From 062ddcfb05b736524f02051780c4e5cb4988248b Mon Sep 17 00:00:00 2001 From: xinhuili Date: Wed, 14 Aug 2019 14:27:15 +0800 Subject: [PATCH] Customise prometheus This patch is to customise prometheus. Signed-off-by: XINHUI LI Change-Id: Ifa841d4df72f0b6f8971aba93bce47fd04815b43 --- src/prometheus/.circleci/config.yml | 119 + src/prometheus/.dockerignore | 5 + src/prometheus/.github/ISSUE_TEMPLATE.md | 55 + src/prometheus/.gitignore | 23 + src/prometheus/.promu.yml | 49 + src/prometheus/.travis.yml | 12 + src/prometheus/CHANGELOG.md | 1120 + src/prometheus/CONTRIBUTING.md | 53 + src/prometheus/Dockerfile | 23 + src/prometheus/LICENSE | 201 + src/prometheus/MAINTAINERS.md | 6 + src/prometheus/Makefile | 34 + src/prometheus/Makefile.common | 100 + src/prometheus/NOTICE | 87 + src/prometheus/README.md | 105 + src/prometheus/VERSION | 1 + src/prometheus/cmd/prometheus/fdlimits_default.go | 32 + src/prometheus/cmd/prometheus/fdlimits_windows.go | 21 + src/prometheus/cmd/prometheus/main.go | 698 + src/prometheus/cmd/prometheus/main_test.go | 157 + src/prometheus/cmd/prometheus/uname_default.go | 23 + src/prometheus/cmd/prometheus/uname_linux.go | 36 + src/prometheus/cmd/prometheus/uname_linux_int8.go | 28 + src/prometheus/cmd/prometheus/uname_linux_uint8.go | 28 + src/prometheus/cmd/promtool/main.go | 449 + src/prometheus/code-of-conduct.md | 3 + src/prometheus/config/config.go | 693 + src/prometheus/config/config_default_test.go | 28 + src/prometheus/config/config_test.go | 796 + src/prometheus/config/config_windows_test.go | 26 + src/prometheus/config/testdata/bearertoken.bad.yml | 6 + .../config/testdata/bearertoken_basicauth.bad.yml | 8 + src/prometheus/config/testdata/conf.good.yml | 242 + .../testdata/ec2_filters_empty_values.bad.yml | 9 + src/prometheus/config/testdata/first.rules | 10 + .../config/testdata/global_timeout.good.yml | 3 + src/prometheus/config/testdata/jobname.bad.yml | 2 + src/prometheus/config/testdata/jobname_dup.bad.yml | 5 + .../config/testdata/kubernetes_bearertoken.bad.yml | 10 + .../kubernetes_bearertoken_basicauth.bad.yml | 12 + .../kubernetes_namespace_discovery.bad.yml | 6 + .../config/testdata/kubernetes_role.bad.yml | 5 + src/prometheus/config/testdata/labeldrop.bad.yml | 5 + src/prometheus/config/testdata/labeldrop2.bad.yml | 5 + src/prometheus/config/testdata/labeldrop3.bad.yml | 5 + src/prometheus/config/testdata/labeldrop4.bad.yml | 5 + src/prometheus/config/testdata/labeldrop5.bad.yml | 5 + src/prometheus/config/testdata/labelkeep.bad.yml | 5 + src/prometheus/config/testdata/labelkeep2.bad.yml | 5 + src/prometheus/config/testdata/labelkeep3.bad.yml | 5 + src/prometheus/config/testdata/labelkeep4.bad.yml | 5 + src/prometheus/config/testdata/labelkeep5.bad.yml | 5 + src/prometheus/config/testdata/labelmap.bad.yml | 5 + src/prometheus/config/testdata/labelname.bad.yml | 3 + src/prometheus/config/testdata/labelname2.bad.yml | 3 + .../marathon_authtoken_authtokenfile.bad.yml | 9 + .../testdata/marathon_authtoken_basicauth.bad.yml | 11 + .../marathon_authtoken_bearertoken.bad.yml | 9 + .../config/testdata/marathon_no_servers.bad.yml | 10 + .../config/testdata/modulus_missing.bad.yml | 5 + src/prometheus/config/testdata/regex.bad.yml | 4 + .../testdata/remote_read_url_missing.bad.yml | 2 + .../testdata/remote_write_url_missing.bad.yml | 2 + src/prometheus/config/testdata/rules.bad.yml | 3 + .../config/testdata/rules_abs_path.good.yml | 4 + .../testdata/rules_abs_path_windows.good.yml | 4 + .../config/testdata/scrape_interval.bad.yml | 4 + .../config/testdata/section_key_dup.bad.yml | 5 + .../config/testdata/static_config.bad.json | 7 + .../config/testdata/static_config.bad.yml | 4 + .../testdata/target_label_hashmod_missing.bad.yml | 6 + .../config/testdata/target_label_missing.bad.yml | 4 + .../config/testdata/unknown_attr.bad.yml | 20 + .../config/testdata/unknown_global_attr.bad.yml | 2 + .../config/testdata/url_in_targetgroup.bad.yml | 5 + src/prometheus/console_libraries/menu.lib | 76 + src/prometheus/console_libraries/prom.lib | 135 + src/prometheus/consoles/index.html.example | 28 + src/prometheus/consoles/node-cpu.html | 60 + src/prometheus/consoles/node-disk.html | 77 + src/prometheus/consoles/node-overview.html | 122 + src/prometheus/consoles/node.html | 34 + src/prometheus/consoles/prometheus-overview.html | 95 + src/prometheus/consoles/prometheus.html | 33 + src/prometheus/discovery/README.md | 219 + src/prometheus/discovery/azure/azure.go | 317 + src/prometheus/discovery/config/config.go | 65 + src/prometheus/discovery/consul/consul.go | 517 + src/prometheus/discovery/consul/consul_test.go | 207 + src/prometheus/discovery/dns/dns.go | 335 + src/prometheus/discovery/ec2/ec2.go | 287 + src/prometheus/discovery/file/file.go | 409 + src/prometheus/discovery/file/file_test.go | 172 + .../discovery/file/fixtures/invalid_nil.json | 9 + .../discovery/file/fixtures/invalid_nil.yml | 5 + src/prometheus/discovery/file/fixtures/valid.json | 11 + src/prometheus/discovery/file/fixtures/valid.yml | 5 + src/prometheus/discovery/gce/gce.go | 265 + src/prometheus/discovery/kubernetes/endpoints.go | 351 + .../discovery/kubernetes/endpoints_test.go | 605 + src/prometheus/discovery/kubernetes/ingress.go | 197 + .../discovery/kubernetes/ingress_test.go | 157 + src/prometheus/discovery/kubernetes/kubernetes.go | 390 + .../discovery/kubernetes/kubernetes_test.go | 236 + src/prometheus/discovery/kubernetes/node.go | 215 + src/prometheus/discovery/kubernetes/node_test.go | 173 + src/prometheus/discovery/kubernetes/pod.go | 256 + src/prometheus/discovery/kubernetes/pod_test.go | 319 + src/prometheus/discovery/kubernetes/service.go | 180 + .../discovery/kubernetes/service_test.go | 204 + src/prometheus/discovery/manager.go | 308 + src/prometheus/discovery/manager_test.go | 857 + src/prometheus/discovery/marathon/marathon.go | 448 + src/prometheus/discovery/marathon/marathon_test.go | 577 + src/prometheus/discovery/openstack/hypervisor.go | 145 + .../discovery/openstack/hypervisor_test.go | 82 + src/prometheus/discovery/openstack/instance.go | 215 + .../discovery/openstack/instance_test.go | 85 + src/prometheus/discovery/openstack/mock.go | 565 + src/prometheus/discovery/openstack/openstack.go | 153 + .../discovery/targetgroup/targetgroup.go | 93 + .../discovery/targetgroup/targetgroup_test.go | 48 + src/prometheus/discovery/triton/triton.go | 226 + src/prometheus/discovery/triton/triton_test.go | 183 + src/prometheus/discovery/zookeeper/zookeeper.go | 278 + .../docs/configuration/alerting_rules.md | 103 + src/prometheus/docs/configuration/configuration.md | 1267 + src/prometheus/docs/configuration/index.md | 4 + .../docs/configuration/recording_rules.md | 120 + .../docs/configuration/template_examples.md | 116 + .../docs/configuration/template_reference.md | 114 + src/prometheus/docs/federation.md | 81 + src/prometheus/docs/getting_started.md | 267 + src/prometheus/docs/images/remote_integrations.png | Bin 0 -> 14508 bytes src/prometheus/docs/index.md | 20 + src/prometheus/docs/installation.md | 96 + src/prometheus/docs/migration.md | 201 + src/prometheus/docs/querying/api.md | 515 + src/prometheus/docs/querying/basics.md | 226 + src/prometheus/docs/querying/examples.md | 86 + src/prometheus/docs/querying/functions.md | 402 + src/prometheus/docs/querying/index.md | 4 + src/prometheus/docs/querying/operators.md | 256 + src/prometheus/docs/stability.md | 38 + src/prometheus/docs/storage.md | 88 + src/prometheus/documentation/dev/api/swagger.json | 140 + .../documentation/examples/custom-sd/README.md | 18 + .../examples/custom-sd/adapter-usage/main.go | 251 + .../examples/custom-sd/adapter/adapter.go | 151 + .../examples/kubernetes-rabbitmq/README.md | 28 + .../examples/kubernetes-rabbitmq/rc.yml | 27 + .../examples/kubernetes-rabbitmq/svc.yml | 14 + .../examples/prometheus-kubernetes.yml | 281 + .../documentation/examples/prometheus.yml | 29 + .../documentation/examples/rbac-setup.yml | 39 + .../remote_storage/example_write_adapter/README.md | 24 + .../remote_storage/example_write_adapter/server.go | 62 + .../remote_storage_adapter/README.md | 55 + .../remote_storage_adapter/graphite/client.go | 109 + .../remote_storage_adapter/graphite/client_test.go | 57 + .../remote_storage_adapter/graphite/escape.go | 103 + .../remote_storage_adapter/influxdb/client.go | 322 + .../remote_storage_adapter/influxdb/client_test.go | 111 + .../remote_storage/remote_storage_adapter/main.go | 326 + .../remote_storage_adapter/opentsdb/client.go | 138 + .../remote_storage_adapter/opentsdb/client_test.go | 75 + .../remote_storage_adapter/opentsdb/tagvalue.go | 157 + .../opentsdb/tagvalue_test.go | 64 + .../documentation/images/architecture.svg | 2 + .../documentation/images/architecture.xml | 1 + .../documentation/images/diagram_note.md | 7 + src/prometheus/notifier/notifier.go | 676 + src/prometheus/notifier/notifier_test.go | 538 + src/prometheus/pkg/labels/labels.go | 272 + src/prometheus/pkg/labels/matcher.go | 88 + src/prometheus/pkg/labels/matcher_test.go | 89 + src/prometheus/pkg/pool/pool.go | 87 + src/prometheus/pkg/relabel/relabel.go | 115 + src/prometheus/pkg/relabel/relabel_test.go | 420 + src/prometheus/pkg/rulefmt/rulefmt.go | 159 + src/prometheus/pkg/rulefmt/rulefmt_test.go | 81 + .../pkg/rulefmt/testdata/bad_annotation.bad.yaml | 7 + .../pkg/rulefmt/testdata/bad_expr.bad.yaml | 5 + .../pkg/rulefmt/testdata/bad_lname.bad.yaml | 7 + .../pkg/rulefmt/testdata/duplicate_grp.bad.yaml | 3 + .../rulefmt/testdata/invalid_record_name.bad.yaml | 5 + .../pkg/rulefmt/testdata/no_rec_alert.bad.yaml | 4 + .../pkg/rulefmt/testdata/noexpr.bad.yaml | 4 + .../pkg/rulefmt/testdata/record_and_alert.bad.yaml | 6 + src/prometheus/pkg/rulefmt/testdata/test.yaml | 64 + src/prometheus/pkg/textparse/lex.l | 129 + src/prometheus/pkg/textparse/lex.l.go | 567 + src/prometheus/pkg/textparse/parse.go | 151 + src/prometheus/pkg/textparse/parse_test.go | 403 + src/prometheus/pkg/textparse/testdata.nometa.txt | 410 + src/prometheus/pkg/textparse/testdata.txt | 528 + src/prometheus/pkg/timestamp/timestamp.go | 26 + src/prometheus/pkg/value/value.go | 34 + src/prometheus/prompb/remote.pb.go | 1030 + src/prometheus/prompb/remote.proto | 44 + src/prometheus/prompb/rpc.pb.go | 1054 + src/prometheus/prompb/rpc.pb.gw.go | 212 + src/prometheus/prompb/rpc.proto | 76 + src/prometheus/prompb/types.pb.go | 1242 + src/prometheus/prompb/types.proto | 56 + src/prometheus/promql/ast.go | 316 + src/prometheus/promql/bench_test.go | 195 + src/prometheus/promql/engine.go | 1740 + src/prometheus/promql/engine_test.go | 382 + src/prometheus/promql/functions.go | 1269 + src/prometheus/promql/functions_test.go | 53 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_1 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_10 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_11 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_12 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_13 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_14 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_15 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_16 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_17 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_18 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_19 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_2 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_20 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_21 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_22 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_23 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_24 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_25 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_26 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_27 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_28 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_29 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_3 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_30 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_31 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_32 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_33 | 1 + .../fuzz-data/ParseExpr/corpus/from_tests_34 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_4 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_5 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_6 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_7 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_8 | 1 + .../promql/fuzz-data/ParseExpr/corpus/from_tests_9 | 1 + .../982cbe5ad899f03c630b1a21876a206707ea3dc9 | 1 + .../ParseMetric/corpus/exposition_formats_0 | 3 + .../ParseMetric/corpus/exposition_formats_1 | 1 + .../ParseMetric/corpus/exposition_formats_2 | 1 + .../ParseMetric/corpus/exposition_formats_3 | 1 + .../ParseMetric/corpus/exposition_formats_4 | 1 + .../ParseMetric/corpus/exposition_formats_5 | 1 + .../ParseMetric/corpus/exposition_formats_6 | 1 + .../ParseMetric/corpus/exposition_formats_7 | 1 + src/prometheus/promql/fuzz.go | 92 + src/prometheus/promql/lex.go | 906 + src/prometheus/promql/lex_test.go | 488 + src/prometheus/promql/parse.go | 1139 + src/prometheus/promql/parse_test.go | 1817 + src/prometheus/promql/printer.go | 234 + src/prometheus/promql/printer_test.go | 163 + src/prometheus/promql/promql_test.go | 37 + src/prometheus/promql/quantile.go | 183 + src/prometheus/promql/test.go | 546 + src/prometheus/promql/testdata/aggregators.test | 251 + src/prometheus/promql/testdata/functions.test | 497 + src/prometheus/promql/testdata/histograms.test | 159 + src/prometheus/promql/testdata/legacy.test | 359 + src/prometheus/promql/testdata/literals.test | 56 + src/prometheus/promql/testdata/operators.test | 369 + src/prometheus/promql/testdata/selectors.test | 59 + src/prometheus/promql/testdata/staleness.test | 51 + src/prometheus/promql/value.go | 216 + src/prometheus/relabel/relabel.go | 117 + src/prometheus/relabel/relabel_test.go | 419 + src/prometheus/rules/alerting.go | 374 + src/prometheus/rules/alerting_test.go | 39 + src/prometheus/rules/manager.go | 645 + src/prometheus/rules/manager_test.go | 314 + src/prometheus/rules/recording.go | 132 + src/prometheus/rules/recording_test.go | 84 + src/prometheus/scrape/helpers_test.go | 70 + src/prometheus/scrape/manager.go | 173 + src/prometheus/scrape/manager_test.go | 267 + src/prometheus/scrape/scrape.go | 1081 + src/prometheus/scrape/scrape_test.go | 1265 + src/prometheus/scrape/target.go | 380 + src/prometheus/scrape/target_test.go | 372 + src/prometheus/scrape/testdata/bearertoken.txt | 1 + src/prometheus/scrape/testdata/ca.cer | 22 + src/prometheus/scrape/testdata/ca.key | 27 + src/prometheus/scrape/testdata/client.cer | 25 + src/prometheus/scrape/testdata/client.key | 51 + src/prometheus/scrape/testdata/server.cer | 20 + src/prometheus/scrape/testdata/server.key | 27 + src/prometheus/scrape/testdata/servername.cer | 20 + src/prometheus/scrape/testdata/servername.key | 27 + src/prometheus/scripts/genproto.sh | 49 + src/prometheus/storage/buffer.go | 237 + src/prometheus/storage/buffer_test.go | 257 + src/prometheus/storage/fanout.go | 499 + src/prometheus/storage/fanout_test.go | 294 + src/prometheus/storage/interface.go | 118 + src/prometheus/storage/noop.go | 54 + src/prometheus/storage/remote/client.go | 176 + src/prometheus/storage/remote/client_test.go | 84 + src/prometheus/storage/remote/codec.go | 383 + src/prometheus/storage/remote/codec_test.go | 147 + src/prometheus/storage/remote/ewma.go | 68 + src/prometheus/storage/remote/queue_manager.go | 542 + .../storage/remote/queue_manager_test.go | 330 + src/prometheus/storage/remote/read.go | 226 + src/prometheus/storage/remote/read_test.go | 327 + src/prometheus/storage/remote/storage.go | 168 + src/prometheus/storage/remote/write.go | 55 + src/prometheus/storage/tsdb/tsdb.go | 292 + src/prometheus/template/template.go | 317 + src/prometheus/template/template_test.go | 283 + src/prometheus/util/httputil/compression.go | 92 + src/prometheus/util/promlint/promlint.go | 268 + src/prometheus/util/promlint/promlint_test.go | 497 + src/prometheus/util/stats/query_stats.go | 89 + src/prometheus/util/stats/stats_test.go | 61 + src/prometheus/util/stats/timer.go | 113 + src/prometheus/util/strutil/quote.go | 223 + src/prometheus/util/strutil/quote_test.go | 125 + src/prometheus/util/strutil/strconv.go | 44 + src/prometheus/util/strutil/strconv_test.go | 49 + src/prometheus/util/testutil/directory.go | 129 + src/prometheus/util/testutil/error.go | 27 + src/prometheus/util/testutil/roundtrip.go | 47 + src/prometheus/util/testutil/storage.go | 56 + src/prometheus/util/testutil/testing.go | 63 + src/prometheus/util/treecache/treecache.go | 293 + .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 + .../arm/compute/availabilitysets.go | 366 + .../Azure/azure-sdk-for-go/arm/compute/client.go | 58 + .../Azure/azure-sdk-for-go/arm/compute/models.go | 1180 + .../arm/compute/usageoperations.go | 134 + .../Azure/azure-sdk-for-go/arm/compute/version.go | 43 + .../arm/compute/virtualmachineextensionimages.go | 238 + .../arm/compute/virtualmachineextensions.go | 261 + .../arm/compute/virtualmachineimages.go | 362 + .../arm/compute/virtualmachines.go | 984 + .../arm/compute/virtualmachinescalesets.go | 1096 + .../arm/compute/virtualmachinescalesetvms.go | 689 + .../arm/compute/virtualmachinesizes.go | 111 + .../arm/network/applicationgateways.go | 635 + .../Azure/azure-sdk-for-go/arm/network/client.go | 130 + .../network/expressroutecircuitauthorizations.go | 343 + .../arm/network/expressroutecircuitpeerings.go | 339 + .../arm/network/expressroutecircuits.go | 761 + .../arm/network/expressrouteserviceproviders.go | 129 + .../azure-sdk-for-go/arm/network/interfaces.go | 821 + .../azure-sdk-for-go/arm/network/loadbalancers.go | 419 + .../arm/network/localnetworkgateways.go | 336 + .../Azure/azure-sdk-for-go/arm/network/models.go | 2148 + .../arm/network/publicipaddresses.go | 448 + .../Azure/azure-sdk-for-go/arm/network/routes.go | 337 + .../azure-sdk-for-go/arm/network/routetables.go | 424 + .../azure-sdk-for-go/arm/network/securitygroups.go | 432 + .../azure-sdk-for-go/arm/network/securityrules.go | 353 + .../Azure/azure-sdk-for-go/arm/network/subnets.go | 360 + .../Azure/azure-sdk-for-go/arm/network/usages.go | 136 + .../Azure/azure-sdk-for-go/arm/network/version.go | 43 + .../network/virtualnetworkgatewayconnections.go | 555 + .../arm/network/virtualnetworkgateways.go | 477 + .../arm/network/virtualnetworkpeerings.go | 342 + .../arm/network/virtualnetworks.go | 488 + .../vendor/github.com/Azure/go-autorest/LICENSE | 191 + .../Azure/go-autorest/autorest/autorest.go | 114 + .../Azure/go-autorest/autorest/azure/async.go | 307 + .../Azure/go-autorest/autorest/azure/azure.go | 180 + .../Azure/go-autorest/autorest/azure/config.go | 13 + .../go-autorest/autorest/azure/devicetoken.go | 193 + .../go-autorest/autorest/azure/environments.go | 157 + .../Azure/go-autorest/autorest/azure/persist.go | 59 + .../Azure/go-autorest/autorest/azure/token.go | 363 + .../Azure/go-autorest/autorest/client.go | 212 + .../Azure/go-autorest/autorest/date/date.go | 82 + .../Azure/go-autorest/autorest/date/time.go | 89 + .../Azure/go-autorest/autorest/date/timerfc1123.go | 86 + .../Azure/go-autorest/autorest/date/utility.go | 11 + .../github.com/Azure/go-autorest/autorest/error.go | 77 + .../Azure/go-autorest/autorest/preparer.go | 433 + .../Azure/go-autorest/autorest/responder.go | 208 + .../Azure/go-autorest/autorest/sender.go | 267 + .../Azure/go-autorest/autorest/to/convert.go | 133 + .../Azure/go-autorest/autorest/utility.go | 178 + .../go-autorest/autorest/validation/validation.go | 373 + .../Azure/go-autorest/autorest/version.go | 18 + .../vendor/github.com/PuerkitoBio/purell/LICENSE | 12 + .../vendor/github.com/PuerkitoBio/purell/purell.go | 375 + .../vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 + .../vendor/github.com/PuerkitoBio/urlesc/urlesc.go | 180 + .../vendor/github.com/alecthomas/template/LICENSE | 27 + .../github.com/alecthomas/template/README.md | 25 + .../vendor/github.com/alecthomas/template/doc.go | 406 + .../vendor/github.com/alecthomas/template/exec.go | 845 + .../vendor/github.com/alecthomas/template/funcs.go | 598 + .../github.com/alecthomas/template/helper.go | 108 + .../github.com/alecthomas/template/parse/lex.go | 556 + .../github.com/alecthomas/template/parse/node.go | 834 + .../github.com/alecthomas/template/parse/parse.go | 700 + .../github.com/alecthomas/template/template.go | 218 + .../vendor/github.com/alecthomas/units/COPYING | 19 + .../vendor/github.com/alecthomas/units/README.md | 11 + .../vendor/github.com/alecthomas/units/bytes.go | 83 + .../vendor/github.com/alecthomas/units/doc.go | 13 + .../vendor/github.com/alecthomas/units/si.go | 26 + .../vendor/github.com/alecthomas/units/util.go | 138 + .../vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + .../vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../github.com/aws/aws-sdk-go/aws/awserr/error.go | 145 + .../github.com/aws/aws-sdk-go/aws/awserr/types.go | 194 + .../github.com/aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../github.com/aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 107 + .../aws/aws-sdk-go/aws/awsutil/string_value.go | 89 + .../github.com/aws/aws-sdk-go/aws/client/client.go | 137 + .../aws/aws-sdk-go/aws/client/default_retryer.go | 90 + .../aws-sdk-go/aws/client/metadata/client_info.go | 12 + .../vendor/github.com/aws/aws-sdk-go/aws/config.go | 419 + .../github.com/aws/aws-sdk-go/aws/convert_types.go | 369 + .../aws/aws-sdk-go/aws/corehandlers/handlers.go | 182 + .../aws-sdk-go/aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/credentials/chain_provider.go | 100 + .../aws/aws-sdk-go/aws/credentials/credentials.go | 223 + .../credentials/ec2rolecreds/ec2_role_provider.go | 178 + .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/aws-sdk-go/aws/credentials/env_provider.go | 77 + .../aws/aws-sdk-go/aws/credentials/example.ini | 12 + .../aws/credentials/shared_credentials_provider.go | 151 + .../aws-sdk-go/aws/credentials/static_provider.go | 57 + .../credentials/stscreds/assume_role_provider.go | 161 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 130 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 162 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 + .../vendor/github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../vendor/github.com/aws/aws-sdk-go/aws/logger.go | 112 + .../aws/aws-sdk-go/aws/request/handlers.go | 187 + .../aws/aws-sdk-go/aws/request/http_request.go | 24 + .../aws/aws-sdk-go/aws/request/offset_reader.go | 58 + .../aws/aws-sdk-go/aws/request/request.go | 344 + .../aws-sdk-go/aws/request/request_pagination.go | 104 + .../aws/aws-sdk-go/aws/request/retryer.go | 101 + .../aws/aws-sdk-go/aws/request/validation.go | 234 + .../github.com/aws/aws-sdk-go/aws/session/doc.go | 223 + .../aws/aws-sdk-go/aws/session/env_config.go | 188 + .../aws/aws-sdk-go/aws/session/session.go | 393 + .../aws/aws-sdk-go/aws/session/shared_config.go | 295 + .../aws/aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go | 24 + .../github.com/aws/aws-sdk-go/aws/signer/v4/v4.go | 713 + .../vendor/github.com/aws/aws-sdk-go/aws/types.go | 106 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../aws/aws-sdk-go/private/endpoints/endpoints.go | 70 + .../aws-sdk-go/private/endpoints/endpoints.json | 82 + .../aws-sdk-go/private/endpoints/endpoints_map.go | 95 + .../aws-sdk-go/private/protocol/ec2query/build.go | 35 + .../private/protocol/ec2query/unmarshal.go | 63 + .../aws/aws-sdk-go/private/protocol/idempotency.go | 75 + .../aws/aws-sdk-go/private/protocol/query/build.go | 36 + .../private/protocol/query/queryutil/queryutil.go | 230 + .../aws-sdk-go/private/protocol/query/unmarshal.go | 35 + .../private/protocol/query/unmarshal_error.go | 66 + .../aws/aws-sdk-go/private/protocol/rest/build.go | 256 + .../aws-sdk-go/private/protocol/rest/payload.go | 45 + .../aws-sdk-go/private/protocol/rest/unmarshal.go | 198 + .../aws/aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 293 + .../private/protocol/xml/xmlutil/unmarshal.go | 260 + .../private/protocol/xml/xmlutil/xml_to_struct.go | 105 + .../aws/aws-sdk-go/private/waiter/waiter.go | 134 + .../github.com/aws/aws-sdk-go/service/ec2/api.go | 49964 +++++++++++ .../aws/aws-sdk-go/service/ec2/customizations.go | 59 + .../aws/aws-sdk-go/service/ec2/service.go | 89 + .../aws/aws-sdk-go/service/ec2/waiters.go | 1027 + .../github.com/aws/aws-sdk-go/service/sts/api.go | 2242 + .../aws/aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/service.go | 130 + .../vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/exampledata.txt | 2388 + .../github.com/beorn7/perks/quantile/stream.go | 292 + .../vendor/github.com/cespare/xxhash/LICENSE.txt | 22 + .../vendor/github.com/cespare/xxhash/README.md | 34 + .../vendor/github.com/cespare/xxhash/xxhash.go | 180 + .../github.com/cespare/xxhash/xxhash_amd64.go | 12 + .../github.com/cespare/xxhash/xxhash_amd64.s | 233 + .../github.com/cespare/xxhash/xxhash_other.go | 75 + .../github.com/cockroachdb/cmux/CONTRIBUTORS | 11 + .../vendor/github.com/cockroachdb/cmux/LICENSE | 202 + .../vendor/github.com/cockroachdb/cmux/README.md | 72 + .../vendor/github.com/cockroachdb/cmux/buffer.go | 49 + .../vendor/github.com/cockroachdb/cmux/cmux.go | 224 + .../vendor/github.com/cockroachdb/cmux/doc.go | 18 + .../vendor/github.com/cockroachdb/cmux/matchers.go | 164 + .../vendor/github.com/cockroachdb/cmux/patricia.go | 179 + .../github.com/cockroachdb/cockroach/LICENSE | 419 + .../cockroach/pkg/util/httputil/http.go | 95 + .../cockroach/pkg/util/protoutil/clone.go | 117 + .../cockroach/pkg/util/protoutil/jsonpb_marshal.go | 128 + .../cockroach/pkg/util/protoutil/marshal.go | 31 + .../cockroach/pkg/util/protoutil/marshaler.go | 96 + .../cockroach/pkg/util/syncutil/mutex_deadlock.go | 47 + .../cockroach/pkg/util/syncutil/mutex_sync.go | 92 + .../vendor/github.com/davecgh/go-spew/LICENSE | 13 + .../github.com/davecgh/go-spew/spew/bypass.go | 151 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../github.com/davecgh/go-spew/spew/config.go | 297 + .../vendor/github.com/davecgh/go-spew/spew/doc.go | 202 + .../vendor/github.com/davecgh/go-spew/spew/dump.go | 509 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../vendor/github.com/davecgh/go-spew/spew/spew.go | 148 + .../vendor/github.com/dgrijalva/jwt-go/LICENSE | 8 + .../github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 + .../vendor/github.com/dgrijalva/jwt-go/README.md | 85 + .../github.com/dgrijalva/jwt-go/VERSION_HISTORY.md | 105 + .../vendor/github.com/dgrijalva/jwt-go/claims.go | 134 + .../vendor/github.com/dgrijalva/jwt-go/doc.go | 4 + .../vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 147 + .../github.com/dgrijalva/jwt-go/ecdsa_utils.go | 67 + .../vendor/github.com/dgrijalva/jwt-go/errors.go | 59 + .../vendor/github.com/dgrijalva/jwt-go/hmac.go | 94 + .../github.com/dgrijalva/jwt-go/map_claims.go | 94 + .../vendor/github.com/dgrijalva/jwt-go/none.go | 52 + .../vendor/github.com/dgrijalva/jwt-go/parser.go | 131 + .../vendor/github.com/dgrijalva/jwt-go/rsa.go | 100 + .../vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 + .../github.com/dgrijalva/jwt-go/rsa_utils.go | 69 + .../github.com/dgrijalva/jwt-go/signing_method.go | 35 + .../vendor/github.com/dgrijalva/jwt-go/token.go | 108 + .../vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/digest/digest.go | 139 + .../docker/distribution/digest/digester.go | 155 + .../github.com/docker/distribution/digest/doc.go | 42 + .../github.com/docker/distribution/digest/set.go | 245 + .../docker/distribution/digest/verifiers.go | 44 + .../docker/distribution/reference/reference.go | 334 + .../docker/distribution/reference/regexp.go | 124 + .../github.com/emicklei/go-restful/CHANGES.md | 163 + .../vendor/github.com/emicklei/go-restful/LICENSE | 22 + .../vendor/github.com/emicklei/go-restful/Srcfile | 1 + .../github.com/emicklei/go-restful/compress.go | 123 + .../emicklei/go-restful/compressor_cache.go | 103 + .../emicklei/go-restful/compressor_pools.go | 91 + .../github.com/emicklei/go-restful/compressors.go | 53 + .../github.com/emicklei/go-restful/constants.go | 30 + .../github.com/emicklei/go-restful/container.go | 361 + .../github.com/emicklei/go-restful/cors_filter.go | 202 + .../vendor/github.com/emicklei/go-restful/curly.go | 162 + .../github.com/emicklei/go-restful/curly_route.go | 52 + .../vendor/github.com/emicklei/go-restful/doc.go | 196 + .../emicklei/go-restful/entity_accessors.go | 163 + .../github.com/emicklei/go-restful/filter.go | 26 + .../github.com/emicklei/go-restful/jsr311.go | 248 + .../github.com/emicklei/go-restful/log/log.go | 31 + .../github.com/emicklei/go-restful/logger.go | 32 + .../vendor/github.com/emicklei/go-restful/mime.go | 45 + .../emicklei/go-restful/options_filter.go | 26 + .../github.com/emicklei/go-restful/parameter.go | 114 + .../emicklei/go-restful/path_expression.go | 69 + .../github.com/emicklei/go-restful/request.go | 131 + .../github.com/emicklei/go-restful/response.go | 235 + .../vendor/github.com/emicklei/go-restful/route.go | 183 + .../emicklei/go-restful/route_builder.go | 240 + .../github.com/emicklei/go-restful/router.go | 18 + .../emicklei/go-restful/service_error.go | 23 + .../emicklei/go-restful/swagger/CHANGES.md | 43 + .../go-restful/swagger/api_declaration_list.go | 64 + .../emicklei/go-restful/swagger/config.go | 38 + .../emicklei/go-restful/swagger/model_builder.go | 449 + .../emicklei/go-restful/swagger/model_list.go | 86 + .../go-restful/swagger/model_property_ext.go | 66 + .../go-restful/swagger/model_property_list.go | 87 + .../go-restful/swagger/ordered_route_map.go | 36 + .../emicklei/go-restful/swagger/swagger.go | 185 + .../emicklei/go-restful/swagger/swagger_builder.go | 21 + .../go-restful/swagger/swagger_webservice.go | 440 + .../github.com/emicklei/go-restful/web_service.go | 268 + .../emicklei/go-restful/web_service_container.go | 39 + .../vendor/github.com/ghodss/yaml/LICENSE | 50 + .../vendor/github.com/ghodss/yaml/fields.go | 497 + .../vendor/github.com/ghodss/yaml/yaml.go | 277 + .../vendor/github.com/go-ini/ini/LICENSE | 191 + .../vendor/github.com/go-ini/ini/Makefile | 12 + .../vendor/github.com/go-ini/ini/README.md | 703 + .../vendor/github.com/go-ini/ini/README_ZH.md | 690 + .../vendor/github.com/go-ini/ini/error.go | 32 + src/prometheus/vendor/github.com/go-ini/ini/ini.go | 501 + src/prometheus/vendor/github.com/go-ini/ini/key.go | 633 + .../vendor/github.com/go-ini/ini/parser.go | 325 + .../vendor/github.com/go-ini/ini/section.go | 206 + .../vendor/github.com/go-ini/ini/struct.go | 431 + .../vendor/github.com/go-kit/kit/LICENSE | 22 + .../vendor/github.com/go-kit/kit/log/README.md | 147 + .../vendor/github.com/go-kit/kit/log/doc.go | 116 + .../github.com/go-kit/kit/log/json_logger.go | 92 + .../vendor/github.com/go-kit/kit/log/level/doc.go | 22 + .../github.com/go-kit/kit/log/level/level.go | 205 + .../vendor/github.com/go-kit/kit/log/log.go | 135 + .../github.com/go-kit/kit/log/logfmt_logger.go | 62 + .../vendor/github.com/go-kit/kit/log/nop_logger.go | 8 + .../vendor/github.com/go-kit/kit/log/stdlib.go | 116 + .../vendor/github.com/go-kit/kit/log/sync.go | 81 + .../vendor/github.com/go-kit/kit/log/value.go | 64 + .../vendor/github.com/go-logfmt/logfmt/LICENSE | 22 + .../vendor/github.com/go-logfmt/logfmt/README.md | 33 + .../vendor/github.com/go-logfmt/logfmt/decode.go | 237 + .../vendor/github.com/go-logfmt/logfmt/doc.go | 6 + .../vendor/github.com/go-logfmt/logfmt/encode.go | 321 + .../vendor/github.com/go-logfmt/logfmt/fuzz.go | 126 + .../github.com/go-logfmt/logfmt/jsonstring.go | 277 + .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 + .../github.com/go-openapi/jsonpointer/LICENSE | 202 + .../github.com/go-openapi/jsonpointer/pointer.go | 238 + .../go-openapi/jsonreference/CODE_OF_CONDUCT.md | 74 + .../github.com/go-openapi/jsonreference/LICENSE | 202 + .../go-openapi/jsonreference/reference.go | 156 + .../github.com/go-openapi/spec/CODE_OF_CONDUCT.md | 74 + .../vendor/github.com/go-openapi/spec/LICENSE | 202 + .../vendor/github.com/go-openapi/spec/bindata.go | 274 + .../github.com/go-openapi/spec/contact_info.go | 24 + .../vendor/github.com/go-openapi/spec/expander.go | 626 + .../github.com/go-openapi/spec/external_docs.go | 24 + .../vendor/github.com/go-openapi/spec/header.go | 165 + .../vendor/github.com/go-openapi/spec/info.go | 168 + .../vendor/github.com/go-openapi/spec/items.go | 199 + .../vendor/github.com/go-openapi/spec/license.go | 23 + .../vendor/github.com/go-openapi/spec/operation.go | 233 + .../vendor/github.com/go-openapi/spec/parameter.go | 299 + .../vendor/github.com/go-openapi/spec/path_item.go | 90 + .../vendor/github.com/go-openapi/spec/paths.go | 97 + .../vendor/github.com/go-openapi/spec/ref.go | 167 + .../vendor/github.com/go-openapi/spec/response.go | 113 + .../vendor/github.com/go-openapi/spec/responses.go | 122 + .../vendor/github.com/go-openapi/spec/schema.go | 628 + .../github.com/go-openapi/spec/security_scheme.go | 142 + .../vendor/github.com/go-openapi/spec/spec.go | 79 + .../vendor/github.com/go-openapi/spec/swagger.go | 317 + .../vendor/github.com/go-openapi/spec/tag.go | 73 + .../github.com/go-openapi/spec/xml_object.go | 68 + .../github.com/go-openapi/swag/CODE_OF_CONDUCT.md | 74 + .../vendor/github.com/go-openapi/swag/LICENSE | 202 + .../vendor/github.com/go-openapi/swag/convert.go | 188 + .../github.com/go-openapi/swag/convert_types.go | 595 + .../vendor/github.com/go-openapi/swag/json.go | 270 + .../vendor/github.com/go-openapi/swag/loading.go | 49 + .../vendor/github.com/go-openapi/swag/net.go | 24 + .../vendor/github.com/go-openapi/swag/path.go | 56 + .../vendor/github.com/go-openapi/swag/util.go | 318 + .../vendor/github.com/go-stack/stack/LICENSE.md | 21 + .../vendor/github.com/go-stack/stack/README.md | 38 + .../vendor/github.com/go-stack/stack/stack.go | 322 + .../vendor/github.com/gogo/protobuf/LICENSE | 36 + .../github.com/gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../github.com/gogo/protobuf/gogoproto/gogo.pb.go | 804 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../github.com/gogo/protobuf/gogoproto/gogo.proto | 133 + .../github.com/gogo/protobuf/gogoproto/helper.go | 357 + .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 1228 + .../vendor/github.com/gogo/protobuf/proto/Makefile | 43 + .../vendor/github.com/gogo/protobuf/proto/clone.go | 234 + .../github.com/gogo/protobuf/proto/decode.go | 978 + .../github.com/gogo/protobuf/proto/decode_gogo.go | 172 + .../github.com/gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 203 + .../github.com/gogo/protobuf/proto/encode.go | 1362 + .../github.com/gogo/protobuf/proto/encode_gogo.go | 350 + .../vendor/github.com/gogo/protobuf/proto/equal.go | 300 + .../github.com/gogo/protobuf/proto/extensions.go | 693 + .../gogo/protobuf/proto/extensions_gogo.go | 294 + .../vendor/github.com/gogo/protobuf/proto/lib.go | 897 + .../github.com/gogo/protobuf/proto/lib_gogo.go | 42 + .../github.com/gogo/protobuf/proto/message_set.go | 311 + .../gogo/protobuf/proto/pointer_reflect.go | 484 + .../gogo/protobuf/proto/pointer_reflect_gogo.go | 85 + .../gogo/protobuf/proto/pointer_unsafe.go | 270 + .../gogo/protobuf/proto/pointer_unsafe_gogo.go | 128 + .../github.com/gogo/protobuf/proto/properties.go | 971 + .../gogo/protobuf/proto/properties_gogo.go | 111 + .../github.com/gogo/protobuf/proto/skip_gogo.go | 119 + .../vendor/github.com/gogo/protobuf/proto/text.go | 939 + .../github.com/gogo/protobuf/proto/text_gogo.go | 57 + .../github.com/gogo/protobuf/proto/text_parser.go | 1013 + .../github.com/gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 229 + .../protobuf/protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../protoc-gen-gogo/descriptor/descriptor.pb.go | 2280 + .../descriptor/descriptor_gostring.gen.go | 772 + .../protobuf/protoc-gen-gogo/descriptor/helper.go | 390 + .../github.com/gogo/protobuf/sortkeys/sortkeys.go | 101 + .../vendor/github.com/gogo/protobuf/types/Makefile | 39 + .../vendor/github.com/gogo/protobuf/types/any.go | 138 + .../github.com/gogo/protobuf/types/any.pb.go | 657 + .../vendor/github.com/gogo/protobuf/types/doc.go | 35 + .../github.com/gogo/protobuf/types/duration.go | 100 + .../github.com/gogo/protobuf/types/duration.pb.go | 494 + .../gogo/protobuf/types/duration_gogo.go | 100 + .../github.com/gogo/protobuf/types/empty.pb.go | 438 + .../gogo/protobuf/types/field_mask.pb.go | 719 + .../github.com/gogo/protobuf/types/struct.pb.go | 1867 + .../github.com/gogo/protobuf/types/timestamp.go | 132 + .../github.com/gogo/protobuf/types/timestamp.pb.go | 510 + .../gogo/protobuf/types/timestamp_gogo.go | 94 + .../github.com/gogo/protobuf/types/wrappers.pb.go | 2234 + .../vendor/github.com/golang/glog/LICENSE | 191 + .../vendor/github.com/golang/glog/README | 44 + .../vendor/github.com/golang/glog/glog.go | 1177 + .../vendor/github.com/golang/glog/glog_file.go | 124 + .../vendor/github.com/golang/protobuf/LICENSE | 31 + .../github.com/golang/protobuf/jsonpb/jsonpb.go | 843 + .../github.com/golang/protobuf/proto/Makefile | 43 + .../github.com/golang/protobuf/proto/clone.go | 229 + .../github.com/golang/protobuf/proto/decode.go | 970 + .../github.com/golang/protobuf/proto/encode.go | 1362 + .../github.com/golang/protobuf/proto/equal.go | 300 + .../github.com/golang/protobuf/proto/extensions.go | 587 + .../vendor/github.com/golang/protobuf/proto/lib.go | 897 + .../golang/protobuf/proto/message_set.go | 311 + .../golang/protobuf/proto/pointer_reflect.go | 484 + .../golang/protobuf/proto/pointer_unsafe.go | 270 + .../github.com/golang/protobuf/proto/properties.go | 872 + .../github.com/golang/protobuf/proto/text.go | 854 + .../golang/protobuf/proto/text_parser.go | 895 + .../protobuf/protoc-gen-go/descriptor/Makefile | 36 + .../protoc-gen-go/descriptor/descriptor.pb.go | 2152 + .../golang/protobuf/ptypes/any/any.pb.go | 155 + .../golang/protobuf/ptypes/any/any.proto | 140 + .../vendor/github.com/golang/snappy/AUTHORS | 15 + .../vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + .../vendor/github.com/golang/snappy/LICENSE | 27 + .../vendor/github.com/golang/snappy/README | 107 + .../vendor/github.com/golang/snappy/decode.go | 237 + .../github.com/golang/snappy/decode_amd64.go | 14 + .../vendor/github.com/golang/snappy/decode_amd64.s | 490 + .../github.com/golang/snappy/decode_other.go | 101 + .../vendor/github.com/golang/snappy/encode.go | 285 + .../github.com/golang/snappy/encode_amd64.go | 29 + .../vendor/github.com/golang/snappy/encode_amd64.s | 730 + .../github.com/golang/snappy/encode_other.go | 238 + .../vendor/github.com/golang/snappy/snappy.go | 87 + .../github.com/google/gofuzz/CONTRIBUTING.md | 67 + .../vendor/github.com/google/gofuzz/LICENSE | 202 + .../vendor/github.com/google/gofuzz/doc.go | 18 + .../vendor/github.com/google/gofuzz/fuzz.go | 446 + .../gophercloud/gophercloud/CHANGELOG.md | 0 .../github.com/gophercloud/gophercloud/FAQ.md | 148 + .../github.com/gophercloud/gophercloud/LICENSE | 191 + .../gophercloud/gophercloud/MIGRATING.md | 32 + .../github.com/gophercloud/gophercloud/README.md | 143 + .../gophercloud/gophercloud/STYLEGUIDE.md | 74 + .../gophercloud/gophercloud/auth_options.go | 327 + .../github.com/gophercloud/gophercloud/doc.go | 69 + .../gophercloud/gophercloud/endpoint_search.go | 76 + .../github.com/gophercloud/gophercloud/errors.go | 408 + .../gophercloud/gophercloud/openstack/auth_env.go | 52 + .../gophercloud/gophercloud/openstack/client.go | 295 + .../compute/v2/extensions/floatingips/doc.go | 3 + .../compute/v2/extensions/floatingips/requests.go | 112 + .../compute/v2/extensions/floatingips/results.go | 117 + .../compute/v2/extensions/floatingips/urls.go | 37 + .../compute/v2/extensions/hypervisors/doc.go | 3 + .../compute/v2/extensions/hypervisors/requests.go | 13 + .../compute/v2/extensions/hypervisors/results.go | 161 + .../compute/v2/extensions/hypervisors/urls.go | 7 + .../openstack/compute/v2/flavors/doc.go | 7 + .../openstack/compute/v2/flavors/requests.go | 163 + .../openstack/compute/v2/flavors/results.go | 115 + .../openstack/compute/v2/flavors/urls.go | 17 + .../gophercloud/openstack/compute/v2/images/doc.go | 7 + .../openstack/compute/v2/images/requests.go | 102 + .../openstack/compute/v2/images/results.go | 83 + .../openstack/compute/v2/images/urls.go | 15 + .../openstack/compute/v2/servers/doc.go | 6 + .../openstack/compute/v2/servers/errors.go | 71 + .../openstack/compute/v2/servers/requests.go | 741 + .../openstack/compute/v2/servers/results.go | 350 + .../openstack/compute/v2/servers/urls.go | 51 + .../openstack/compute/v2/servers/util.go | 20 + .../gophercloud/openstack/endpoint_location.go | 99 + .../gophercloud/gophercloud/openstack/errors.go | 71 + .../openstack/identity/v2/tenants/doc.go | 7 + .../openstack/identity/v2/tenants/requests.go | 29 + .../openstack/identity/v2/tenants/results.go | 53 + .../openstack/identity/v2/tenants/urls.go | 7 + .../openstack/identity/v2/tokens/doc.go | 5 + .../openstack/identity/v2/tokens/requests.go | 99 + .../openstack/identity/v2/tokens/results.go | 144 + .../openstack/identity/v2/tokens/urls.go | 13 + .../openstack/identity/v3/tokens/doc.go | 6 + .../openstack/identity/v3/tokens/requests.go | 200 + .../openstack/identity/v3/tokens/results.go | 103 + .../openstack/identity/v3/tokens/urls.go | 7 + .../gophercloud/openstack/utils/choose_version.go | 114 + .../gophercloud/gophercloud/pagination/http.go | 60 + .../gophercloud/gophercloud/pagination/linked.go | 92 + .../gophercloud/gophercloud/pagination/marker.go | 58 + .../gophercloud/gophercloud/pagination/pager.go | 238 + .../gophercloud/gophercloud/pagination/pkg.go | 4 + .../gophercloud/gophercloud/pagination/single.go | 33 + .../github.com/gophercloud/gophercloud/params.go | 445 + .../gophercloud/gophercloud/provider_client.go | 307 + .../github.com/gophercloud/gophercloud/results.go | 336 + .../gophercloud/gophercloud/service_client.go | 122 + .../github.com/gophercloud/gophercloud/util.go | 102 + .../grpc-ecosystem/grpc-gateway/LICENSE.txt | 27 + .../grpc-ecosystem/grpc-gateway/runtime/context.go | 187 + .../grpc-ecosystem/grpc-gateway/runtime/convert.go | 58 + .../grpc-ecosystem/grpc-gateway/runtime/doc.go | 5 + .../grpc-ecosystem/grpc-gateway/runtime/errors.go | 127 + .../grpc-ecosystem/grpc-gateway/runtime/handler.go | 187 + .../runtime/internal/stream_chunk.pb.go | 92 + .../runtime/internal/stream_chunk.proto | 12 + .../grpc-gateway/runtime/marshal_json.go | 42 + .../grpc-gateway/runtime/marshal_jsonpb.go | 189 + .../grpc-gateway/runtime/marshal_proto.go | 62 + .../grpc-gateway/runtime/marshaler.go | 48 + .../grpc-gateway/runtime/marshaler_registry.go | 91 + .../grpc-ecosystem/grpc-gateway/runtime/mux.go | 260 + .../grpc-ecosystem/grpc-gateway/runtime/pattern.go | 227 + .../grpc-gateway/runtime/proto2_convert.go | 80 + .../grpc-gateway/runtime/proto_errors.go | 61 + .../grpc-ecosystem/grpc-gateway/runtime/query.go | 279 + .../grpc-ecosystem/grpc-gateway/utilities/doc.go | 2 + .../grpc-gateway/utilities/pattern.go | 22 + .../grpc-ecosystem/grpc-gateway/utilities/trie.go | 177 + .../vendor/github.com/hashicorp/consul/LICENSE | 354 + .../github.com/hashicorp/consul/api/README.md | 43 + .../vendor/github.com/hashicorp/consul/api/acl.go | 140 + .../github.com/hashicorp/consul/api/agent.go | 471 + .../vendor/github.com/hashicorp/consul/api/api.go | 623 + .../github.com/hashicorp/consul/api/catalog.go | 191 + .../github.com/hashicorp/consul/api/coordinate.go | 66 + .../github.com/hashicorp/consul/api/event.go | 104 + .../github.com/hashicorp/consul/api/health.go | 199 + .../vendor/github.com/hashicorp/consul/api/kv.go | 419 + .../vendor/github.com/hashicorp/consul/api/lock.go | 384 + .../github.com/hashicorp/consul/api/operator.go | 163 + .../hashicorp/consul/api/prepared_query.go | 193 + .../vendor/github.com/hashicorp/consul/api/raw.go | 24 + .../github.com/hashicorp/consul/api/semaphore.go | 512 + .../github.com/hashicorp/consul/api/session.go | 217 + .../github.com/hashicorp/consul/api/snapshot.go | 47 + .../github.com/hashicorp/consul/api/status.go | 43 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../github.com/hashicorp/go-cleanhttp/README.md | 30 + .../github.com/hashicorp/go-cleanhttp/cleanhttp.go | 53 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../vendor/github.com/hashicorp/serf/LICENSE | 354 + .../github.com/hashicorp/serf/coordinate/client.go | 180 + .../github.com/hashicorp/serf/coordinate/config.go | 70 + .../hashicorp/serf/coordinate/coordinate.go | 183 + .../hashicorp/serf/coordinate/phantom.go | 187 + .../vendor/github.com/influxdata/influxdb/LICENSE | 20 + .../influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md | 25 + .../influxdata/influxdb/client/v2/client.go | 609 + .../influxdata/influxdb/client/v2/udp.go | 112 + .../influxdata/influxdb/models/consistency.go | 48 + .../influxdata/influxdb/models/inline_fnv.go | 32 + .../influxdb/models/inline_strconv_parse.go | 38 + .../influxdata/influxdb/models/points.go | 2035 + .../github.com/influxdata/influxdb/models/rows.go | 62 + .../influxdata/influxdb/models/statistic.go | 42 + .../github.com/influxdata/influxdb/models/time.go | 74 + .../influxdata/influxdb/pkg/escape/bytes.go | 111 + .../influxdata/influxdb/pkg/escape/strings.go | 21 + .../vendor/github.com/influxdb/influxdb/LICENSE | 20 + .../vendor/github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + .../vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../jmespath/go-jmespath/astnodetype_string.go | 16 + .../github.com/jmespath/go-jmespath/functions.go | 842 + .../github.com/jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../vendor/github.com/jmespath/go-jmespath/util.go | 185 + .../vendor/github.com/json-iterator/go/Gopkg.lock | 33 + .../vendor/github.com/json-iterator/go/Gopkg.toml | 33 + .../vendor/github.com/json-iterator/go/LICENSE | 21 + .../vendor/github.com/json-iterator/go/README.md | 88 + .../vendor/github.com/json-iterator/go/build.sh | 12 + .../github.com/json-iterator/go/feature_adapter.go | 150 + .../github.com/json-iterator/go/feature_any.go | 245 + .../json-iterator/go/feature_any_array.go | 278 + .../json-iterator/go/feature_any_bool.go | 137 + .../json-iterator/go/feature_any_float.go | 83 + .../json-iterator/go/feature_any_int32.go | 74 + .../json-iterator/go/feature_any_int64.go | 74 + .../json-iterator/go/feature_any_invalid.go | 82 + .../github.com/json-iterator/go/feature_any_nil.go | 69 + .../json-iterator/go/feature_any_number.go | 123 + .../json-iterator/go/feature_any_object.go | 374 + .../json-iterator/go/feature_any_string.go | 166 + .../json-iterator/go/feature_any_uint32.go | 74 + .../json-iterator/go/feature_any_uint64.go | 74 + .../github.com/json-iterator/go/feature_config.go | 307 + .../go/feature_config_with_sync_map.go | 65 + .../go/feature_config_without_sync_map.go | 71 + .../github.com/json-iterator/go/feature_iter.go | 322 + .../json-iterator/go/feature_iter_array.go | 58 + .../json-iterator/go/feature_iter_float.go | 347 + .../json-iterator/go/feature_iter_int.go | 339 + .../json-iterator/go/feature_iter_object.go | 248 + .../json-iterator/go/feature_iter_skip.go | 129 + .../json-iterator/go/feature_iter_skip_sloppy.go | 144 + .../json-iterator/go/feature_iter_skip_strict.go | 89 + .../json-iterator/go/feature_iter_string.go | 215 + .../json-iterator/go/feature_json_number.go | 31 + .../github.com/json-iterator/go/feature_pool.go | 59 + .../github.com/json-iterator/go/feature_reflect.go | 607 + .../json-iterator/go/feature_reflect_array.go | 110 + .../json-iterator/go/feature_reflect_extension.go | 424 + .../json-iterator/go/feature_reflect_map.go | 260 + .../json-iterator/go/feature_reflect_native.go | 789 + .../json-iterator/go/feature_reflect_object.go | 194 + .../json-iterator/go/feature_reflect_optional.go | 124 + .../json-iterator/go/feature_reflect_slice.go | 143 + .../go/feature_reflect_struct_decoder.go | 974 + .../github.com/json-iterator/go/feature_stream.go | 206 + .../json-iterator/go/feature_stream_float.go | 94 + .../json-iterator/go/feature_stream_int.go | 190 + .../json-iterator/go/feature_stream_string.go | 372 + .../json-iterator/go/fuzzy_mode_convert_table.md | 7 + .../vendor/github.com/json-iterator/go/jsoniter.go | 18 + .../vendor/github.com/json-iterator/go/test.sh | 12 + .../vendor/github.com/juju/ratelimit/LICENSE | 191 + .../vendor/github.com/juju/ratelimit/ratelimit.go | 245 + .../vendor/github.com/juju/ratelimit/reader.go | 51 + .../github.com/julienschmidt/httprouter/LICENSE | 24 + .../github.com/julienschmidt/httprouter/README.md | 323 + .../github.com/julienschmidt/httprouter/path.go | 123 + .../github.com/julienschmidt/httprouter/router.go | 363 + .../github.com/julienschmidt/httprouter/tree.go | 555 + src/prometheus/vendor/github.com/kr/logfmt/Readme | 12 + .../vendor/github.com/kr/logfmt/decode.go | 184 + .../vendor/github.com/kr/logfmt/scanner.go | 149 + .../vendor/github.com/kr/logfmt/unquote.go | 149 + .../vendor/github.com/mailru/easyjson/LICENSE | 7 + .../github.com/mailru/easyjson/buffer/pool.go | 207 + .../github.com/mailru/easyjson/jlexer/error.go | 15 + .../github.com/mailru/easyjson/jlexer/lexer.go | 956 + .../github.com/mailru/easyjson/jwriter/writer.go | 273 + .../matttproud/golang_protobuf_extensions/LICENSE | 201 + .../matttproud/golang_protobuf_extensions/NOTICE | 1 + .../golang_protobuf_extensions/pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../golang_protobuf_extensions/pbutil/encode.go | 46 + src/prometheus/vendor/github.com/miekg/dns/AUTHORS | 1 + .../vendor/github.com/miekg/dns/CONTRIBUTORS | 10 + .../vendor/github.com/miekg/dns/COPYRIGHT | 9 + .../vendor/github.com/miekg/dns/Gopkg.lock | 21 + .../vendor/github.com/miekg/dns/Gopkg.toml | 26 + src/prometheus/vendor/github.com/miekg/dns/LICENSE | 32 + .../vendor/github.com/miekg/dns/Makefile.fuzz | 33 + .../vendor/github.com/miekg/dns/Makefile.release | 52 + .../vendor/github.com/miekg/dns/README.md | 168 + .../vendor/github.com/miekg/dns/client.go | 506 + .../vendor/github.com/miekg/dns/clientconfig.go | 139 + .../github.com/miekg/dns/compress_generate.go | 188 + src/prometheus/vendor/github.com/miekg/dns/dane.go | 43 + .../vendor/github.com/miekg/dns/defaults.go | 288 + src/prometheus/vendor/github.com/miekg/dns/dns.go | 107 + .../vendor/github.com/miekg/dns/dnssec.go | 784 + .../vendor/github.com/miekg/dns/dnssec_keygen.go | 178 + .../vendor/github.com/miekg/dns/dnssec_keyscan.go | 297 + .../vendor/github.com/miekg/dns/dnssec_privkey.go | 93 + src/prometheus/vendor/github.com/miekg/dns/doc.go | 272 + src/prometheus/vendor/github.com/miekg/dns/edns.go | 627 + .../vendor/github.com/miekg/dns/format.go | 87 + src/prometheus/vendor/github.com/miekg/dns/fuzz.go | 23 + .../vendor/github.com/miekg/dns/generate.go | 159 + .../vendor/github.com/miekg/dns/labels.go | 191 + src/prometheus/vendor/github.com/miekg/dns/msg.go | 1154 + .../vendor/github.com/miekg/dns/msg_generate.go | 348 + .../vendor/github.com/miekg/dns/msg_helpers.go | 637 + .../vendor/github.com/miekg/dns/nsecx.go | 106 + .../vendor/github.com/miekg/dns/privaterr.go | 149 + .../vendor/github.com/miekg/dns/rawmsg.go | 49 + .../vendor/github.com/miekg/dns/reverse.go | 38 + .../vendor/github.com/miekg/dns/sanitize.go | 84 + src/prometheus/vendor/github.com/miekg/dns/scan.go | 1007 + .../vendor/github.com/miekg/dns/scan_rr.go | 2199 + .../vendor/github.com/miekg/dns/scanner.go | 56 + .../vendor/github.com/miekg/dns/server.go | 719 + src/prometheus/vendor/github.com/miekg/dns/sig0.go | 218 + .../vendor/github.com/miekg/dns/singleinflight.go | 57 + .../vendor/github.com/miekg/dns/smimea.go | 47 + src/prometheus/vendor/github.com/miekg/dns/tlsa.go | 47 + src/prometheus/vendor/github.com/miekg/dns/tsig.go | 386 + .../vendor/github.com/miekg/dns/types.go | 1381 + .../vendor/github.com/miekg/dns/types_generate.go | 272 + src/prometheus/vendor/github.com/miekg/dns/udp.go | 89 + .../vendor/github.com/miekg/dns/udp_windows.go | 37 + .../vendor/github.com/miekg/dns/update.go | 106 + .../vendor/github.com/miekg/dns/version.go | 15 + src/prometheus/vendor/github.com/miekg/dns/xfr.go | 260 + .../vendor/github.com/miekg/dns/zcompress.go | 118 + src/prometheus/vendor/github.com/miekg/dns/zmsg.go | 3615 + .../vendor/github.com/miekg/dns/ztypes.go | 863 + .../vendor/github.com/mwitkow/go-conntrack/LICENSE | 201 + .../github.com/mwitkow/go-conntrack/README.md | 88 + .../mwitkow/go-conntrack/dialer_reporter.go | 108 + .../mwitkow/go-conntrack/dialer_wrapper.go | 166 + .../mwitkow/go-conntrack/listener_reporter.go | 43 + .../mwitkow/go-conntrack/listener_wrapper.go | 137 + .../vendor/github.com/oklog/oklog/LICENSE | 201 + .../github.com/oklog/oklog/pkg/group/group.go | 62 + .../vendor/github.com/oklog/ulid/AUTHORS.md | 2 + .../vendor/github.com/oklog/ulid/CHANGELOG.md | 12 + .../vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 + .../vendor/github.com/oklog/ulid/LICENSE | 201 + .../vendor/github.com/oklog/ulid/README.md | 147 + .../vendor/github.com/oklog/ulid/ulid.go | 332 + .../opentracing-contrib/go-stdlib/LICENSE | 27 + .../go-stdlib/nethttp/client.go | 250 + .../opentracing-contrib/go-stdlib/nethttp/doc.go | 3 + .../go-stdlib/nethttp/server.go | 80 + .../opentracing/opentracing-go/CHANGELOG.md | 14 + .../github.com/opentracing/opentracing-go/LICENSE | 21 + .../github.com/opentracing/opentracing-go/Makefile | 32 + .../opentracing/opentracing-go/README.md | 147 + .../opentracing/opentracing-go/ext/tags.go | 158 + .../opentracing/opentracing-go/globaltracer.go | 32 + .../opentracing/opentracing-go/gocontext.go | 57 + .../opentracing/opentracing-go/log/field.go | 245 + .../opentracing/opentracing-go/log/util.go | 54 + .../github.com/opentracing/opentracing-go/noop.go | 64 + .../opentracing/opentracing-go/propagation.go | 176 + .../github.com/opentracing/opentracing-go/span.go | 185 + .../opentracing/opentracing-go/tracer.go | 305 + .../vendor/github.com/petermattis/goid/LICENSE | 202 + .../vendor/github.com/petermattis/goid/README.md | 4 + .../vendor/github.com/petermattis/goid/goid.go | 35 + .../github.com/petermattis/goid/goid_go1.3.c | 23 + .../github.com/petermattis/goid/goid_go1.3.go | 21 + .../github.com/petermattis/goid/goid_go1.4.go | 34 + .../github.com/petermattis/goid/goid_go1.4.s | 18 + .../github.com/petermattis/goid/goid_go1.5.go | 67 + .../github.com/petermattis/goid/goid_go1.5plus.s | 27 + .../petermattis/goid/goid_go1.5plus_arm.s | 27 + .../github.com/petermattis/goid/goid_go1.6plus.go | 53 + .../github.com/petermattis/goid/goid_go1.9plus.go | 47 + .../github.com/petermattis/goid/goid_slow.go | 23 + .../vendor/github.com/pkg/errors/LICENSE | 23 + .../vendor/github.com/pkg/errors/README.md | 52 + .../vendor/github.com/pkg/errors/appveyor.yml | 32 + .../vendor/github.com/pkg/errors/errors.go | 269 + .../vendor/github.com/pkg/errors/stack.go | 178 + .../vendor/github.com/pmezard/go-difflib/LICENSE | 27 + .../pmezard/go-difflib/difflib/difflib.go | 758 + .../github.com/prometheus/client_golang/LICENSE | 201 + .../github.com/prometheus/client_golang/NOTICE | 23 + .../prometheus/client_golang/api/client.go | 131 + .../client_golang/api/prometheus/v1/api.go | 497 + .../prometheus/client_golang/prometheus/README.md | 1 + .../client_golang/prometheus/collector.go | 75 + .../prometheus/client_golang/prometheus/counter.go | 277 + .../prometheus/client_golang/prometheus/desc.go | 188 + .../prometheus/client_golang/prometheus/doc.go | 191 + .../client_golang/prometheus/expvar_collector.go | 119 + .../prometheus/client_golang/prometheus/fnv.go | 29 + .../prometheus/client_golang/prometheus/gauge.go | 286 + .../client_golang/prometheus/go_collector.go | 284 + .../client_golang/prometheus/histogram.go | 505 + .../prometheus/client_golang/prometheus/http.go | 523 + .../prometheus/client_golang/prometheus/labels.go | 57 + .../prometheus/client_golang/prometheus/metric.go | 158 + .../client_golang/prometheus/observer.go | 52 + .../client_golang/prometheus/process_collector.go | 140 + .../client_golang/prometheus/promhttp/delegator.go | 199 + .../prometheus/promhttp/delegator_1_8.go | 181 + .../prometheus/promhttp/delegator_pre_1_8.go | 44 + .../client_golang/prometheus/promhttp/http.go | 311 + .../prometheus/promhttp/instrument_client.go | 97 + .../prometheus/promhttp/instrument_client_1_8.go | 144 + .../prometheus/promhttp/instrument_server.go | 447 + .../client_golang/prometheus/registry.go | 807 + .../prometheus/client_golang/prometheus/summary.go | 609 + .../prometheus/client_golang/prometheus/timer.go | 51 + .../prometheus/client_golang/prometheus/untyped.go | 42 + .../prometheus/client_golang/prometheus/value.go | 160 + .../prometheus/client_golang/prometheus/vec.go | 469 + .../github.com/prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 364 + .../vendor/github.com/prometheus/common/LICENSE | 201 + .../vendor/github.com/prometheus/common/NOTICE | 5 + .../github.com/prometheus/common/config/config.go | 34 + .../prometheus/common/config/http_config.go | 317 + .../github.com/prometheus/common/expfmt/decode.go | 429 + .../github.com/prometheus/common/expfmt/encode.go | 88 + .../github.com/prometheus/common/expfmt/expfmt.go | 38 + .../github.com/prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 303 + .../prometheus/common/expfmt/text_parse.go | 757 + .../internal/bitbucket.org/ww/goautoneg/README.txt | 67 + .../internal/bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../github.com/prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../github.com/prometheus/common/model/labels.go | 210 + .../github.com/prometheus/common/model/labelset.go | 169 + .../github.com/prometheus/common/model/metric.go | 103 + .../github.com/prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../github.com/prometheus/common/model/silence.go | 106 + .../github.com/prometheus/common/model/time.go | 264 + .../github.com/prometheus/common/model/value.go | 416 + .../prometheus/common/promlog/flag/flag.go | 33 + .../github.com/prometheus/common/promlog/log.go | 63 + .../github.com/prometheus/common/route/route.go | 110 + .../github.com/prometheus/common/version/info.go | 89 + .../vendor/github.com/prometheus/procfs/AUTHORS.md | 20 + .../github.com/prometheus/procfs/CONTRIBUTING.md | 18 + .../vendor/github.com/prometheus/procfs/LICENSE | 201 + .../vendor/github.com/prometheus/procfs/Makefile | 6 + .../vendor/github.com/prometheus/procfs/NOTICE | 7 + .../vendor/github.com/prometheus/procfs/README.md | 10 + .../vendor/github.com/prometheus/procfs/doc.go | 45 + .../vendor/github.com/prometheus/procfs/fs.go | 33 + .../vendor/github.com/prometheus/procfs/ipvs.go | 224 + .../vendor/github.com/prometheus/procfs/mdstat.go | 138 + .../vendor/github.com/prometheus/procfs/proc.go | 212 + .../vendor/github.com/prometheus/procfs/proc_io.go | 55 + .../github.com/prometheus/procfs/proc_limits.go | 137 + .../github.com/prometheus/procfs/proc_stat.go | 175 + .../vendor/github.com/prometheus/procfs/stat.go | 56 + .../vendor/github.com/prometheus/tsdb/LICENSE | 201 + .../vendor/github.com/prometheus/tsdb/README.md | 12 + .../vendor/github.com/prometheus/tsdb/block.go | 542 + .../github.com/prometheus/tsdb/chunkenc/bstream.go | 210 + .../github.com/prometheus/tsdb/chunkenc/chunk.go | 131 + .../github.com/prometheus/tsdb/chunkenc/xor.go | 386 + .../github.com/prometheus/tsdb/chunks/chunks.go | 408 + .../vendor/github.com/prometheus/tsdb/compact.go | 836 + .../vendor/github.com/prometheus/tsdb/db.go | 1001 + .../github.com/prometheus/tsdb/encoding_helpers.go | 183 + .../prometheus/tsdb/fileutil/dir_unix.go | 22 + .../prometheus/tsdb/fileutil/dir_windows.go | 46 + .../prometheus/tsdb/fileutil/fileutil.go | 25 + .../github.com/prometheus/tsdb/fileutil/flock.go | 41 + .../prometheus/tsdb/fileutil/flock_plan9.go | 32 + .../prometheus/tsdb/fileutil/flock_solaris.go | 59 + .../prometheus/tsdb/fileutil/flock_unix.go | 54 + .../prometheus/tsdb/fileutil/flock_windows.go | 36 + .../github.com/prometheus/tsdb/fileutil/mmap.go | 48 + .../prometheus/tsdb/fileutil/mmap_386.go | 5 + .../prometheus/tsdb/fileutil/mmap_amd64.go | 5 + .../prometheus/tsdb/fileutil/mmap_unix.go | 30 + .../prometheus/tsdb/fileutil/mmap_windows.go | 46 + .../prometheus/tsdb/fileutil/preallocate.go | 54 + .../prometheus/tsdb/fileutil/preallocate_darwin.go | 41 + .../prometheus/tsdb/fileutil/preallocate_linux.go | 47 + .../prometheus/tsdb/fileutil/preallocate_other.go | 25 + .../github.com/prometheus/tsdb/fileutil/sync.go | 29 + .../prometheus/tsdb/fileutil/sync_darwin.go | 40 + .../prometheus/tsdb/fileutil/sync_linux.go | 34 + .../vendor/github.com/prometheus/tsdb/head.go | 1340 + .../prometheus/tsdb/index/encoding_helpers.go | 179 + .../github.com/prometheus/tsdb/index/index.go | 1099 + .../github.com/prometheus/tsdb/index/postings.go | 592 + .../github.com/prometheus/tsdb/labels/labels.go | 212 + .../github.com/prometheus/tsdb/labels/selector.go | 120 + .../vendor/github.com/prometheus/tsdb/querier.go | 925 + .../vendor/github.com/prometheus/tsdb/repair.go | 104 + .../github.com/prometheus/tsdb/tombstones.go | 256 + .../vendor/github.com/prometheus/tsdb/wal.go | 1208 + .../vendor/github.com/samuel/go-zookeeper/LICENSE | 25 + .../github.com/samuel/go-zookeeper/zk/conn.go | 1032 + .../github.com/samuel/go-zookeeper/zk/constants.go | 240 + .../samuel/go-zookeeper/zk/dnshostprovider.go | 88 + .../github.com/samuel/go-zookeeper/zk/flw.go | 266 + .../github.com/samuel/go-zookeeper/zk/lock.go | 150 + .../samuel/go-zookeeper/zk/server_help.go | 216 + .../samuel/go-zookeeper/zk/server_java.go | 136 + .../github.com/samuel/go-zookeeper/zk/structs.go | 609 + .../github.com/samuel/go-zookeeper/zk/util.go | 54 + .../vendor/github.com/sasha-s/go-deadlock/LICENSE | 201 + .../github.com/sasha-s/go-deadlock/Readme.md | 112 + .../github.com/sasha-s/go-deadlock/deadlock.go | 297 + .../github.com/sasha-s/go-deadlock/stacktraces.go | 107 + .../vendor/github.com/spf13/pflag/LICENSE | 28 + .../vendor/github.com/spf13/pflag/README.md | 296 + .../vendor/github.com/spf13/pflag/bool.go | 94 + .../vendor/github.com/spf13/pflag/bool_slice.go | 147 + .../vendor/github.com/spf13/pflag/count.go | 96 + .../vendor/github.com/spf13/pflag/duration.go | 86 + .../vendor/github.com/spf13/pflag/flag.go | 1128 + .../vendor/github.com/spf13/pflag/float32.go | 88 + .../vendor/github.com/spf13/pflag/float64.go | 84 + .../vendor/github.com/spf13/pflag/golangflag.go | 101 + .../vendor/github.com/spf13/pflag/int.go | 84 + .../vendor/github.com/spf13/pflag/int32.go | 88 + .../vendor/github.com/spf13/pflag/int64.go | 84 + .../vendor/github.com/spf13/pflag/int8.go | 88 + .../vendor/github.com/spf13/pflag/int_slice.go | 128 + src/prometheus/vendor/github.com/spf13/pflag/ip.go | 94 + .../vendor/github.com/spf13/pflag/ip_slice.go | 148 + .../vendor/github.com/spf13/pflag/ipmask.go | 122 + .../vendor/github.com/spf13/pflag/ipnet.go | 98 + .../vendor/github.com/spf13/pflag/string.go | 80 + .../vendor/github.com/spf13/pflag/string_array.go | 103 + .../vendor/github.com/spf13/pflag/string_slice.go | 129 + .../vendor/github.com/spf13/pflag/uint.go | 88 + .../vendor/github.com/spf13/pflag/uint16.go | 88 + .../vendor/github.com/spf13/pflag/uint32.go | 88 + .../vendor/github.com/spf13/pflag/uint64.go | 88 + .../vendor/github.com/spf13/pflag/uint8.go | 88 + .../vendor/github.com/spf13/pflag/uint_slice.go | 126 + .../vendor/github.com/stretchr/testify/LICENSE | 22 + .../stretchr/testify/assert/assertion_forward.go | 387 + .../testify/assert/assertion_forward.go.tmpl | 4 + .../stretchr/testify/assert/assertions.go | 1007 + .../github.com/stretchr/testify/assert/doc.go | 45 + .../github.com/stretchr/testify/assert/errors.go | 10 + .../stretchr/testify/assert/forward_assertions.go | 16 + .../stretchr/testify/assert/http_assertions.go | 106 + .../github.com/stretchr/testify/require/doc.go | 28 + .../testify/require/forward_requirements.go | 16 + .../github.com/stretchr/testify/require/require.go | 464 + .../stretchr/testify/require/require.go.tmpl | 6 + .../stretchr/testify/require/require_forward.go | 388 + .../testify/require/require_forward.go.tmpl | 4 + .../stretchr/testify/require/requirements.go | 9 + .../github.com/stretchr/testify/suite/doc.go | 65 + .../stretchr/testify/suite/interfaces.go | 46 + .../github.com/stretchr/testify/suite/suite.go | 121 + src/prometheus/vendor/github.com/ugorji/go/LICENSE | 22 + .../vendor/github.com/ugorji/go/codec/0doc.go | 193 + .../vendor/github.com/ugorji/go/codec/binc.go | 918 + .../vendor/github.com/ugorji/go/codec/cbor.go | 584 + .../vendor/github.com/ugorji/go/codec/decode.go | 2015 + .../vendor/github.com/ugorji/go/codec/encode.go | 1405 + .../ugorji/go/codec/fast-path.generated.go | 38900 +++++++++ .../github.com/ugorji/go/codec/fast-path.go.tmpl | 511 + .../github.com/ugorji/go/codec/fast-path.not.go | 32 + .../ugorji/go/codec/gen-dec-array.go.tmpl | 101 + .../github.com/ugorji/go/codec/gen-dec-map.go.tmpl | 58 + .../ugorji/go/codec/gen-helper.generated.go | 233 + .../github.com/ugorji/go/codec/gen-helper.go.tmpl | 364 + .../github.com/ugorji/go/codec/gen.generated.go | 171 + .../vendor/github.com/ugorji/go/codec/gen.go | 1920 + .../vendor/github.com/ugorji/go/codec/helper.go | 1129 + .../github.com/ugorji/go/codec/helper_internal.go | 242 + .../ugorji/go/codec/helper_not_unsafe.go | 20 + .../github.com/ugorji/go/codec/helper_unsafe.go | 45 + .../vendor/github.com/ugorji/go/codec/json.go | 1072 + .../vendor/github.com/ugorji/go/codec/msgpack.go | 844 + .../vendor/github.com/ugorji/go/codec/noop.go | 213 + .../vendor/github.com/ugorji/go/codec/prebuild.go | 3 + .../vendor/github.com/ugorji/go/codec/rpc.go | 180 + .../vendor/github.com/ugorji/go/codec/simple.go | 518 + .../vendor/github.com/ugorji/go/codec/test.py | 120 + .../vendor/github.com/ugorji/go/codec/time.go | 222 + src/prometheus/vendor/golang.org/x/crypto/LICENSE | 27 + src/prometheus/vendor/golang.org/x/crypto/PATENTS | 22 + .../vendor/golang.org/x/crypto/ed25519/ed25519.go | 181 + .../crypto/ed25519/internal/edwards25519/const.go | 1422 + .../ed25519/internal/edwards25519/edwards25519.go | 1771 + src/prometheus/vendor/golang.org/x/net/LICENSE | 27 + src/prometheus/vendor/golang.org/x/net/PATENTS | 22 + src/prometheus/vendor/golang.org/x/net/bpf/asm.go | 41 + .../vendor/golang.org/x/net/bpf/constants.go | 218 + src/prometheus/vendor/golang.org/x/net/bpf/doc.go | 82 + .../vendor/golang.org/x/net/bpf/instructions.go | 704 + .../vendor/golang.org/x/net/bpf/setter.go | 10 + src/prometheus/vendor/golang.org/x/net/bpf/vm.go | 140 + .../vendor/golang.org/x/net/bpf/vm_instructions.go | 174 + .../vendor/golang.org/x/net/context/context.go | 56 + .../golang.org/x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + .../vendor/golang.org/x/net/context/go17.go | 72 + .../vendor/golang.org/x/net/context/go19.go | 20 + .../vendor/golang.org/x/net/context/pre_go17.go | 300 + .../vendor/golang.org/x/net/context/pre_go19.go | 109 + .../vendor/golang.org/x/net/http2/Dockerfile | 51 + .../vendor/golang.org/x/net/http2/Makefile | 3 + .../vendor/golang.org/x/net/http2/README | 20 + .../vendor/golang.org/x/net/http2/ciphers.go | 641 + .../golang.org/x/net/http2/client_conn_pool.go | 256 + .../golang.org/x/net/http2/configure_transport.go | 80 + .../vendor/golang.org/x/net/http2/databuffer.go | 146 + .../vendor/golang.org/x/net/http2/errors.go | 133 + .../vendor/golang.org/x/net/http2/flow.go | 50 + .../vendor/golang.org/x/net/http2/frame.go | 1579 + .../vendor/golang.org/x/net/http2/go16.go | 16 + .../vendor/golang.org/x/net/http2/go17.go | 106 + .../vendor/golang.org/x/net/http2/go17_not18.go | 36 + .../vendor/golang.org/x/net/http2/go18.go | 56 + .../vendor/golang.org/x/net/http2/go19.go | 16 + .../vendor/golang.org/x/net/http2/gotrack.go | 170 + .../vendor/golang.org/x/net/http2/headermap.go | 78 + .../vendor/golang.org/x/net/http2/hpack/encode.go | 240 + .../vendor/golang.org/x/net/http2/hpack/hpack.go | 490 + .../vendor/golang.org/x/net/http2/hpack/huffman.go | 212 + .../vendor/golang.org/x/net/http2/hpack/tables.go | 479 + .../vendor/golang.org/x/net/http2/http2.go | 391 + .../vendor/golang.org/x/net/http2/not_go16.go | 21 + .../vendor/golang.org/x/net/http2/not_go17.go | 87 + .../vendor/golang.org/x/net/http2/not_go18.go | 29 + .../vendor/golang.org/x/net/http2/not_go19.go | 16 + .../vendor/golang.org/x/net/http2/pipe.go | 163 + .../vendor/golang.org/x/net/http2/server.go | 2888 + .../vendor/golang.org/x/net/http2/transport.go | 2303 + .../vendor/golang.org/x/net/http2/write.go | 365 + .../vendor/golang.org/x/net/http2/writesched.go | 242 + .../golang.org/x/net/http2/writesched_priority.go | 452 + .../golang.org/x/net/http2/writesched_random.go | 72 + .../vendor/golang.org/x/net/idna/idna.go | 732 + .../vendor/golang.org/x/net/idna/punycode.go | 203 + .../vendor/golang.org/x/net/idna/tables.go | 4557 + .../vendor/golang.org/x/net/idna/trie.go | 72 + .../vendor/golang.org/x/net/idna/trieval.go | 119 + .../vendor/golang.org/x/net/internal/iana/const.go | 227 + .../golang.org/x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../x/net/internal/socket/cmsghdr_linux_32bit.go | 14 + .../x/net/internal/socket/cmsghdr_linux_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../golang.org/x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 19 + .../x/net/internal/socket/iovec_64bit.go | 19 + .../x/net/internal/socket/iovec_solaris_64bit.go | 19 + .../golang.org/x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../golang.org/x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 16 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../x/net/internal/socket/msghdr_linux_32bit.go | 24 + .../x/net/internal/socket/msghdr_linux_64bit.go | 24 + .../x/net/internal/socket/msghdr_openbsd.go | 14 + .../x/net/internal/socket/msghdr_solaris_64bit.go | 36 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../golang.org/x/net/internal/socket/rawconn.go | 66 + .../x/net/internal/socket/rawconn_mmsg.go | 74 + .../x/net/internal/socket/rawconn_msg.go | 77 + .../x/net/internal/socket/rawconn_nommsg.go | 18 + .../x/net/internal/socket/rawconn_nomsg.go | 18 + .../x/net/internal/socket/rawconn_stub.go | 25 + .../golang.org/x/net/internal/socket/reflect.go | 62 + .../golang.org/x/net/internal/socket/socket.go | 285 + .../vendor/golang.org/x/net/internal/socket/sys.go | 33 + .../golang.org/x/net/internal/socket/sys_bsd.go | 17 + .../golang.org/x/net/internal/socket/sys_bsdvar.go | 14 + .../golang.org/x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 7 + .../golang.org/x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../x/net/internal/socket/sys_linux_386.s | 11 + .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../x/net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../x/net/internal/socket/sys_linux_ppc64le.go | 10 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../golang.org/x/net/internal/socket/sys_netbsd.go | 25 + .../golang.org/x/net/internal/socket/sys_posix.go | 168 + .../x/net/internal/socket/sys_solaris.go | 71 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../golang.org/x/net/internal/socket/sys_stub.go | 64 + .../golang.org/x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 70 + .../x/net/internal/socket/zsys_darwin_386.go | 59 + .../x/net/internal/socket/zsys_darwin_amd64.go | 61 + .../x/net/internal/socket/zsys_darwin_arm.go | 59 + .../x/net/internal/socket/zsys_darwin_arm64.go | 61 + .../x/net/internal/socket/zsys_dragonfly_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_386.go | 59 + .../x/net/internal/socket/zsys_freebsd_amd64.go | 61 + .../x/net/internal/socket/zsys_freebsd_arm.go | 59 + .../x/net/internal/socket/zsys_linux_386.go | 63 + .../x/net/internal/socket/zsys_linux_amd64.go | 66 + .../x/net/internal/socket/zsys_linux_arm.go | 63 + .../x/net/internal/socket/zsys_linux_arm64.go | 66 + .../x/net/internal/socket/zsys_linux_mips.go | 63 + .../x/net/internal/socket/zsys_linux_mips64.go | 66 + .../x/net/internal/socket/zsys_linux_mips64le.go | 66 + .../x/net/internal/socket/zsys_linux_mipsle.go | 63 + .../x/net/internal/socket/zsys_linux_ppc64.go | 66 + .../x/net/internal/socket/zsys_linux_ppc64le.go | 66 + .../x/net/internal/socket/zsys_linux_s390x.go | 66 + .../x/net/internal/socket/zsys_netbsd_386.go | 65 + .../x/net/internal/socket/zsys_netbsd_amd64.go | 68 + .../x/net/internal/socket/zsys_netbsd_arm.go | 65 + .../x/net/internal/socket/zsys_openbsd_386.go | 59 + .../x/net/internal/socket/zsys_openbsd_amd64.go | 61 + .../x/net/internal/socket/zsys_openbsd_arm.go | 59 + .../x/net/internal/socket/zsys_solaris_amd64.go | 60 + .../x/net/internal/timeseries/timeseries.go | 525 + .../vendor/golang.org/x/net/ipv4/batch.go | 191 + .../vendor/golang.org/x/net/ipv4/control.go | 144 + .../vendor/golang.org/x/net/ipv4/control_bsd.go | 40 + .../golang.org/x/net/ipv4/control_pktinfo.go | 39 + .../vendor/golang.org/x/net/ipv4/control_stub.go | 13 + .../vendor/golang.org/x/net/ipv4/control_unix.go | 73 + .../golang.org/x/net/ipv4/control_windows.go | 16 + .../vendor/golang.org/x/net/ipv4/dgramopt.go | 265 + src/prometheus/vendor/golang.org/x/net/ipv4/doc.go | 244 + .../vendor/golang.org/x/net/ipv4/endpoint.go | 187 + .../vendor/golang.org/x/net/ipv4/genericopt.go | 57 + .../vendor/golang.org/x/net/ipv4/header.go | 159 + .../vendor/golang.org/x/net/ipv4/helper.go | 63 + .../vendor/golang.org/x/net/ipv4/iana.go | 38 + .../vendor/golang.org/x/net/ipv4/icmp.go | 57 + .../vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 + .../vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 + .../vendor/golang.org/x/net/ipv4/packet.go | 69 + .../vendor/golang.org/x/net/ipv4/packet_go1_8.go | 56 + .../vendor/golang.org/x/net/ipv4/packet_go1_9.go | 67 + .../vendor/golang.org/x/net/ipv4/payload.go | 23 + .../vendor/golang.org/x/net/ipv4/payload_cmsg.go | 36 + .../golang.org/x/net/ipv4/payload_cmsg_go1_8.go | 59 + .../golang.org/x/net/ipv4/payload_cmsg_go1_9.go | 67 + .../vendor/golang.org/x/net/ipv4/payload_nocmsg.go | 42 + .../vendor/golang.org/x/net/ipv4/sockopt.go | 44 + .../vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 + .../vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 + .../vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + .../vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 + .../vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 + .../vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + .../vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 + .../vendor/golang.org/x/net/ipv4/sys_darwin.go | 93 + .../vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 + .../vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 + .../vendor/golang.org/x/net/ipv4/sys_linux.go | 59 + .../vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 + .../vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + .../vendor/golang.org/x/net/ipv4/sys_stub.go | 13 + .../vendor/golang.org/x/net/ipv4/sys_windows.go | 67 + .../vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 + .../vendor/golang.org/x/net/ipv4/zsys_dragonfly.go | 31 + .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 + .../golang.org/x/net/ipv4/zsys_freebsd_amd64.go | 95 + .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 + .../vendor/golang.org/x/net/ipv4/zsys_linux_386.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 150 + .../vendor/golang.org/x/net/ipv4/zsys_linux_arm.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_mips64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_mips64le.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_mipsle.go | 148 + .../vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_ppc64le.go | 150 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 + .../vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 + .../vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 + .../vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 + .../vendor/golang.org/x/net/ipv6/batch.go | 119 + .../vendor/golang.org/x/net/ipv6/control.go | 187 + .../golang.org/x/net/ipv6/control_rfc2292_unix.go | 48 + .../golang.org/x/net/ipv6/control_rfc3542_unix.go | 94 + .../vendor/golang.org/x/net/ipv6/control_stub.go | 13 + .../vendor/golang.org/x/net/ipv6/control_unix.go | 55 + .../golang.org/x/net/ipv6/control_windows.go | 16 + .../vendor/golang.org/x/net/ipv6/dgramopt.go | 302 + src/prometheus/vendor/golang.org/x/net/ipv6/doc.go | 243 + .../vendor/golang.org/x/net/ipv6/endpoint.go | 128 + .../vendor/golang.org/x/net/ipv6/genericopt.go | 58 + .../vendor/golang.org/x/net/ipv6/header.go | 55 + .../vendor/golang.org/x/net/ipv6/helper.go | 57 + .../vendor/golang.org/x/net/ipv6/iana.go | 86 + .../vendor/golang.org/x/net/ipv6/icmp.go | 60 + .../vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 + .../vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 + .../vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 + .../vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 + .../vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 + .../vendor/golang.org/x/net/ipv6/payload.go | 23 + .../vendor/golang.org/x/net/ipv6/payload_cmsg.go | 35 + .../golang.org/x/net/ipv6/payload_cmsg_go1_8.go | 55 + .../golang.org/x/net/ipv6/payload_cmsg_go1_9.go | 57 + .../vendor/golang.org/x/net/ipv6/payload_nocmsg.go | 41 + .../vendor/golang.org/x/net/ipv6/sockopt.go | 43 + .../vendor/golang.org/x/net/ipv6/sockopt_posix.go | 87 + .../vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 + .../vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + .../vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 + .../vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + .../vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 + .../vendor/golang.org/x/net/ipv6/sys_darwin.go | 106 + .../vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 + .../vendor/golang.org/x/net/ipv6/sys_linux.go | 74 + .../vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 + .../vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + .../vendor/golang.org/x/net/ipv6/sys_stub.go | 13 + .../vendor/golang.org/x/net/ipv6/sys_windows.go | 75 + .../vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 + .../vendor/golang.org/x/net/ipv6/zsys_dragonfly.go | 88 + .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 + .../golang.org/x/net/ipv6/zsys_freebsd_amd64.go | 124 + .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 + .../vendor/golang.org/x/net/ipv6/zsys_linux_386.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 172 + .../vendor/golang.org/x/net/ipv6/zsys_linux_arm.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_mips64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_mips64le.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_mipsle.go | 170 + .../vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_ppc64le.go | 172 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 + .../vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 + .../vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 + .../vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 + .../vendor/golang.org/x/net/lex/httplex/httplex.go | 351 + .../vendor/golang.org/x/net/netutil/listen.go | 48 + .../vendor/golang.org/x/net/trace/events.go | 532 + .../vendor/golang.org/x/net/trace/histogram.go | 365 + .../vendor/golang.org/x/net/trace/trace.go | 1103 + .../vendor/golang.org/x/net/trace/trace_go16.go | 21 + .../vendor/golang.org/x/net/trace/trace_go17.go | 21 + src/prometheus/vendor/golang.org/x/oauth2/AUTHORS | 3 + .../vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 + .../vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + src/prometheus/vendor/golang.org/x/oauth2/LICENSE | 27 + .../vendor/golang.org/x/oauth2/README.md | 64 + .../vendor/golang.org/x/oauth2/client_appengine.go | 25 + .../vendor/golang.org/x/oauth2/google/appengine.go | 86 + .../golang.org/x/oauth2/google/appengine_hook.go | 13 + .../golang.org/x/oauth2/google/appenginevm_hook.go | 14 + .../vendor/golang.org/x/oauth2/google/default.go | 155 + .../vendor/golang.org/x/oauth2/google/google.go | 146 + .../vendor/golang.org/x/oauth2/google/jwt.go | 71 + .../vendor/golang.org/x/oauth2/google/sdk.go | 168 + .../vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + .../vendor/golang.org/x/oauth2/internal/token.go | 225 + .../golang.org/x/oauth2/internal/transport.go | 69 + .../vendor/golang.org/x/oauth2/jws/jws.go | 191 + .../vendor/golang.org/x/oauth2/jwt/jwt.go | 153 + .../vendor/golang.org/x/oauth2/oauth2.go | 337 + src/prometheus/vendor/golang.org/x/oauth2/token.go | 158 + .../vendor/golang.org/x/oauth2/transport.go | 132 + src/prometheus/vendor/golang.org/x/sync/LICENSE | 27 + src/prometheus/vendor/golang.org/x/sync/PATENTS | 22 + .../vendor/golang.org/x/sync/errgroup/errgroup.go | 67 + src/prometheus/vendor/golang.org/x/sys/LICENSE | 27 + src/prometheus/vendor/golang.org/x/sys/PATENTS | 22 + .../vendor/golang.org/x/sys/unix/README.md | 173 + .../vendor/golang.org/x/sys/unix/affinity_linux.go | 124 + .../vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + .../vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../golang.org/x/sys/unix/asm_dragonfly_amd64.s | 29 + .../vendor/golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../vendor/golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + .../vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 + .../vendor/golang.org/x/sys/unix/asm_linux_amd64.s | 57 + .../vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 + .../vendor/golang.org/x/sys/unix/asm_linux_arm64.s | 52 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 + .../vendor/golang.org/x/sys/unix/asm_linux_mipsx.s | 54 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 56 + .../vendor/golang.org/x/sys/unix/asm_linux_s390x.s | 56 + .../vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + .../vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../vendor/golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../vendor/golang.org/x/sys/unix/asm_openbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/bluetooth_linux.go | 35 + .../vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 + .../vendor/golang.org/x/sys/unix/constants.go | 13 + .../vendor/golang.org/x/sys/unix/dev_darwin.go | 24 + .../vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 + .../vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 + .../vendor/golang.org/x/sys/unix/dev_linux.go | 42 + .../vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 + .../vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 + .../vendor/golang.org/x/sys/unix/dirent.go | 17 + .../vendor/golang.org/x/sys/unix/endian_big.go | 9 + .../vendor/golang.org/x/sys/unix/endian_little.go | 9 + .../vendor/golang.org/x/sys/unix/env_unix.go | 31 + .../golang.org/x/sys/unix/errors_freebsd_386.go | 227 + .../golang.org/x/sys/unix/errors_freebsd_amd64.go | 227 + .../golang.org/x/sys/unix/errors_freebsd_arm.go | 226 + .../vendor/golang.org/x/sys/unix/flock.go | 22 + .../golang.org/x/sys/unix/flock_linux_32bit.go | 13 + .../vendor/golang.org/x/sys/unix/gccgo.go | 61 + .../vendor/golang.org/x/sys/unix/gccgo_c.c | 47 + .../golang.org/x/sys/unix/gccgo_linux_amd64.go | 20 + .../vendor/golang.org/x/sys/unix/mkall.sh | 188 + .../vendor/golang.org/x/sys/unix/mkerrors.sh | 581 + .../vendor/golang.org/x/sys/unix/mksyscall.pl | 341 + .../golang.org/x/sys/unix/mksyscall_solaris.pl | 289 + .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 264 + .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 + .../golang.org/x/sys/unix/mksysnum_dragonfly.pl | 50 + .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 50 + .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 + .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 + .../vendor/golang.org/x/sys/unix/openbsd_pledge.go | 38 + .../vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 + .../vendor/golang.org/x/sys/unix/race.go | 30 + .../vendor/golang.org/x/sys/unix/race0.go | 25 + .../vendor/golang.org/x/sys/unix/sockcmsg_linux.go | 36 + .../vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 104 + src/prometheus/vendor/golang.org/x/sys/unix/str.go | 26 + .../vendor/golang.org/x/sys/unix/syscall.go | 54 + .../vendor/golang.org/x/sys/unix/syscall_bsd.go | 665 + .../vendor/golang.org/x/sys/unix/syscall_darwin.go | 602 + .../golang.org/x/sys/unix/syscall_darwin_386.go | 68 + .../golang.org/x/sys/unix/syscall_darwin_amd64.go | 68 + .../golang.org/x/sys/unix/syscall_darwin_arm.go | 66 + .../golang.org/x/sys/unix/syscall_darwin_arm64.go | 68 + .../golang.org/x/sys/unix/syscall_dragonfly.go | 523 + .../x/sys/unix/syscall_dragonfly_amd64.go | 52 + .../golang.org/x/sys/unix/syscall_freebsd.go | 760 + .../golang.org/x/sys/unix/syscall_freebsd_386.go | 52 + .../golang.org/x/sys/unix/syscall_freebsd_amd64.go | 52 + .../golang.org/x/sys/unix/syscall_freebsd_arm.go | 52 + .../vendor/golang.org/x/sys/unix/syscall_linux.go | 1503 + .../golang.org/x/sys/unix/syscall_linux_386.go | 391 + .../golang.org/x/sys/unix/syscall_linux_amd64.go | 144 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../golang.org/x/sys/unix/syscall_linux_arm.go | 255 + .../golang.org/x/sys/unix/syscall_linux_arm64.go | 189 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 + .../golang.org/x/sys/unix/syscall_linux_gccgo.go | 21 + .../golang.org/x/sys/unix/syscall_linux_mips64x.go | 209 + .../golang.org/x/sys/unix/syscall_linux_mipsx.go | 231 + .../golang.org/x/sys/unix/syscall_linux_ppc64x.go | 127 + .../golang.org/x/sys/unix/syscall_linux_s390x.go | 320 + .../golang.org/x/sys/unix/syscall_linux_sparc64.go | 143 + .../vendor/golang.org/x/sys/unix/syscall_netbsd.go | 567 + .../golang.org/x/sys/unix/syscall_netbsd_386.go | 33 + .../golang.org/x/sys/unix/syscall_netbsd_amd64.go | 33 + .../golang.org/x/sys/unix/syscall_netbsd_arm.go | 33 + .../golang.org/x/sys/unix/syscall_openbsd.go | 367 + .../golang.org/x/sys/unix/syscall_openbsd_386.go | 33 + .../golang.org/x/sys/unix/syscall_openbsd_amd64.go | 33 + .../golang.org/x/sys/unix/syscall_openbsd_arm.go | 33 + .../golang.org/x/sys/unix/syscall_solaris.go | 718 + .../golang.org/x/sys/unix/syscall_solaris_amd64.go | 28 + .../vendor/golang.org/x/sys/unix/syscall_unix.go | 316 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 + .../vendor/golang.org/x/sys/unix/timestruct.go | 82 + .../golang.org/x/sys/unix/zerrors_darwin_386.go | 1769 + .../golang.org/x/sys/unix/zerrors_darwin_amd64.go | 1769 + .../golang.org/x/sys/unix/zerrors_darwin_arm.go | 1769 + .../golang.org/x/sys/unix/zerrors_darwin_arm64.go | 1769 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1578 + .../golang.org/x/sys/unix/zerrors_freebsd_386.go | 1756 + .../golang.org/x/sys/unix/zerrors_freebsd_amd64.go | 1757 + .../golang.org/x/sys/unix/zerrors_freebsd_arm.go | 1765 + .../golang.org/x/sys/unix/zerrors_linux_386.go | 2298 + .../golang.org/x/sys/unix/zerrors_linux_amd64.go | 2299 + .../golang.org/x/sys/unix/zerrors_linux_arm.go | 2306 + .../golang.org/x/sys/unix/zerrors_linux_arm64.go | 2289 + .../golang.org/x/sys/unix/zerrors_linux_mips.go | 2308 + .../golang.org/x/sys/unix/zerrors_linux_mips64.go | 2308 + .../x/sys/unix/zerrors_linux_mips64le.go | 2308 + .../golang.org/x/sys/unix/zerrors_linux_mipsle.go | 2308 + .../golang.org/x/sys/unix/zerrors_linux_ppc64.go | 2361 + .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go | 2361 + .../golang.org/x/sys/unix/zerrors_linux_s390x.go | 2360 + .../golang.org/x/sys/unix/zerrors_linux_sparc64.go | 2142 + .../golang.org/x/sys/unix/zerrors_netbsd_386.go | 1719 + .../golang.org/x/sys/unix/zerrors_netbsd_amd64.go | 1709 + .../golang.org/x/sys/unix/zerrors_netbsd_arm.go | 1698 + .../golang.org/x/sys/unix/zerrors_openbsd_386.go | 1591 + .../golang.org/x/sys/unix/zerrors_openbsd_amd64.go | 1590 + .../golang.org/x/sys/unix/zerrors_openbsd_arm.go | 1593 + .../golang.org/x/sys/unix/zerrors_solaris_amd64.go | 1489 + .../golang.org/x/sys/unix/zptrace386_linux.go | 80 + .../golang.org/x/sys/unix/zptracearm_linux.go | 41 + .../golang.org/x/sys/unix/zptracemips_linux.go | 50 + .../golang.org/x/sys/unix/zptracemipsle_linux.go | 50 + .../golang.org/x/sys/unix/zsyscall_darwin_386.go | 1635 + .../golang.org/x/sys/unix/zsyscall_darwin_amd64.go | 1635 + .../golang.org/x/sys/unix/zsyscall_darwin_arm.go | 1635 + .../golang.org/x/sys/unix/zsyscall_darwin_arm64.go | 1635 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1508 + .../golang.org/x/sys/unix/zsyscall_freebsd_386.go | 1937 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1937 + .../golang.org/x/sys/unix/zsyscall_freebsd_arm.go | 1937 + .../golang.org/x/sys/unix/zsyscall_linux_386.go | 1994 + .../golang.org/x/sys/unix/zsyscall_linux_amd64.go | 2187 + .../golang.org/x/sys/unix/zsyscall_linux_arm.go | 2096 + .../golang.org/x/sys/unix/zsyscall_linux_arm64.go | 2044 + .../golang.org/x/sys/unix/zsyscall_linux_mips.go | 2152 + .../golang.org/x/sys/unix/zsyscall_linux_mips64.go | 2135 + .../x/sys/unix/zsyscall_linux_mips64le.go | 2135 + .../golang.org/x/sys/unix/zsyscall_linux_mipsle.go | 2152 + .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go | 2198 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 2198 + .../golang.org/x/sys/unix/zsyscall_linux_s390x.go | 1978 + .../x/sys/unix/zsyscall_linux_sparc64.go | 1833 + .../golang.org/x/sys/unix/zsyscall_netbsd_386.go | 1414 + .../golang.org/x/sys/unix/zsyscall_netbsd_amd64.go | 1414 + .../golang.org/x/sys/unix/zsyscall_netbsd_arm.go | 1414 + .../golang.org/x/sys/unix/zsyscall_openbsd_386.go | 1472 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1472 + .../golang.org/x/sys/unix/zsyscall_openbsd_arm.go | 1472 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1669 + .../golang.org/x/sys/unix/zsysctl_openbsd_386.go | 270 + .../golang.org/x/sys/unix/zsysctl_openbsd_amd64.go | 270 + .../golang.org/x/sys/unix/zsysctl_openbsd_arm.go | 270 + .../golang.org/x/sys/unix/zsysnum_darwin_386.go | 436 + .../golang.org/x/sys/unix/zsysnum_darwin_amd64.go | 436 + .../golang.org/x/sys/unix/zsysnum_darwin_arm.go | 436 + .../golang.org/x/sys/unix/zsysnum_darwin_arm64.go | 436 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../golang.org/x/sys/unix/zsysnum_freebsd_386.go | 353 + .../golang.org/x/sys/unix/zsysnum_freebsd_amd64.go | 353 + .../golang.org/x/sys/unix/zsysnum_freebsd_arm.go | 353 + .../golang.org/x/sys/unix/zsysnum_linux_386.go | 390 + .../golang.org/x/sys/unix/zsysnum_linux_amd64.go | 342 + .../golang.org/x/sys/unix/zsysnum_linux_arm.go | 362 + .../golang.org/x/sys/unix/zsysnum_linux_arm64.go | 286 + .../golang.org/x/sys/unix/zsysnum_linux_mips.go | 375 + .../golang.org/x/sys/unix/zsysnum_linux_mips64.go | 335 + .../x/sys/unix/zsysnum_linux_mips64le.go | 335 + .../golang.org/x/sys/unix/zsysnum_linux_mipsle.go | 375 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64.go | 370 + .../golang.org/x/sys/unix/zsysnum_linux_ppc64le.go | 370 + .../golang.org/x/sys/unix/zsysnum_linux_s390x.go | 334 + .../golang.org/x/sys/unix/zsysnum_linux_sparc64.go | 348 + .../golang.org/x/sys/unix/zsysnum_netbsd_386.go | 274 + .../golang.org/x/sys/unix/zsysnum_netbsd_amd64.go | 274 + .../golang.org/x/sys/unix/zsysnum_netbsd_arm.go | 274 + .../golang.org/x/sys/unix/zsysnum_openbsd_386.go | 207 + .../golang.org/x/sys/unix/zsysnum_openbsd_amd64.go | 207 + .../golang.org/x/sys/unix/zsysnum_openbsd_arm.go | 213 + .../golang.org/x/sys/unix/ztypes_darwin_386.go | 489 + .../golang.org/x/sys/unix/ztypes_darwin_amd64.go | 499 + .../golang.org/x/sys/unix/ztypes_darwin_arm.go | 490 + .../golang.org/x/sys/unix/ztypes_darwin_arm64.go | 499 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 486 + .../golang.org/x/sys/unix/ztypes_freebsd_386.go | 553 + .../golang.org/x/sys/unix/ztypes_freebsd_amd64.go | 556 + .../golang.org/x/sys/unix/ztypes_freebsd_arm.go | 556 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 897 + .../golang.org/x/sys/unix/ztypes_linux_amd64.go | 915 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 886 + .../golang.org/x/sys/unix/ztypes_linux_arm64.go | 894 + .../golang.org/x/sys/unix/ztypes_linux_mips.go | 891 + .../golang.org/x/sys/unix/ztypes_linux_mips64.go | 896 + .../golang.org/x/sys/unix/ztypes_linux_mips64le.go | 896 + .../golang.org/x/sys/unix/ztypes_linux_mipsle.go | 891 + .../golang.org/x/sys/unix/ztypes_linux_ppc64.go | 904 + .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go | 904 + .../golang.org/x/sys/unix/ztypes_linux_s390x.go | 921 + .../golang.org/x/sys/unix/ztypes_linux_sparc64.go | 690 + .../golang.org/x/sys/unix/ztypes_netbsd_386.go | 439 + .../golang.org/x/sys/unix/ztypes_netbsd_amd64.go | 446 + .../golang.org/x/sys/unix/ztypes_netbsd_arm.go | 444 + .../golang.org/x/sys/unix/ztypes_openbsd_386.go | 484 + .../golang.org/x/sys/unix/ztypes_openbsd_amd64.go | 491 + .../golang.org/x/sys/unix/ztypes_openbsd_arm.go | 477 + .../golang.org/x/sys/unix/ztypes_solaris_amd64.go | 459 + src/prometheus/vendor/golang.org/x/text/LICENSE | 27 + src/prometheus/vendor/golang.org/x/text/PATENTS | 22 + .../vendor/golang.org/x/text/cases/cases.go | 129 + .../vendor/golang.org/x/text/cases/context.go | 281 + .../vendor/golang.org/x/text/cases/fold.go | 26 + .../vendor/golang.org/x/text/cases/gen.go | 831 + .../vendor/golang.org/x/text/cases/gen_trieval.go | 217 + .../vendor/golang.org/x/text/cases/info.go | 83 + .../vendor/golang.org/x/text/cases/map.go | 599 + .../vendor/golang.org/x/text/cases/tables.go | 2213 + .../vendor/golang.org/x/text/cases/trieval.go | 213 + .../vendor/golang.org/x/text/internal/tag/tag.go | 100 + .../vendor/golang.org/x/text/language/Makefile | 16 + .../vendor/golang.org/x/text/language/common.go | 16 + .../vendor/golang.org/x/text/language/coverage.go | 197 + .../golang.org/x/text/language/gen_common.go | 20 + .../vendor/golang.org/x/text/language/gen_index.go | 162 + .../vendor/golang.org/x/text/language/go1_1.go | 38 + .../vendor/golang.org/x/text/language/go1_2.go | 11 + .../vendor/golang.org/x/text/language/index.go | 762 + .../vendor/golang.org/x/text/language/language.go | 975 + .../vendor/golang.org/x/text/language/lookup.go | 396 + .../golang.org/x/text/language/maketables.go | 1635 + .../vendor/golang.org/x/text/language/match.go | 840 + .../vendor/golang.org/x/text/language/parse.go | 859 + .../vendor/golang.org/x/text/language/tables.go | 2791 + .../vendor/golang.org/x/text/language/tags.go | 143 + .../vendor/golang.org/x/text/runes/cond.go | 126 + .../vendor/golang.org/x/text/runes/runes.go | 278 + .../golang.org/x/text/secure/bidirule/bidirule.go | 342 + .../golang.org/x/text/secure/precis/class.go | 36 + .../golang.org/x/text/secure/precis/context.go | 139 + .../vendor/golang.org/x/text/secure/precis/doc.go | 14 + .../vendor/golang.org/x/text/secure/precis/gen.go | 310 + .../golang.org/x/text/secure/precis/gen_trieval.go | 68 + .../golang.org/x/text/secure/precis/nickname.go | 70 + .../golang.org/x/text/secure/precis/options.go | 106 + .../golang.org/x/text/secure/precis/profile.go | 330 + .../golang.org/x/text/secure/precis/profiles.go | 56 + .../golang.org/x/text/secure/precis/tables.go | 3788 + .../golang.org/x/text/secure/precis/transformer.go | 32 + .../golang.org/x/text/secure/precis/trieval.go | 64 + .../golang.org/x/text/transform/transform.go | 661 + .../vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 + .../golang.org/x/text/unicode/bidi/bracket.go | 307 + .../vendor/golang.org/x/text/unicode/bidi/core.go | 1055 + .../vendor/golang.org/x/text/unicode/bidi/gen.go | 133 + .../golang.org/x/text/unicode/bidi/gen_ranges.go | 57 + .../golang.org/x/text/unicode/bidi/gen_trieval.go | 64 + .../vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../golang.org/x/text/unicode/bidi/tables.go | 1779 + .../golang.org/x/text/unicode/bidi/trieval.go | 60 + .../golang.org/x/text/unicode/norm/composition.go | 514 + .../golang.org/x/text/unicode/norm/forminfo.go | 256 + .../vendor/golang.org/x/text/unicode/norm/input.go | 105 + .../vendor/golang.org/x/text/unicode/norm/iter.go | 450 + .../golang.org/x/text/unicode/norm/maketables.go | 978 + .../golang.org/x/text/unicode/norm/normalize.go | 576 + .../golang.org/x/text/unicode/norm/readwriter.go | 126 + .../golang.org/x/text/unicode/norm/tables.go | 7627 ++ .../golang.org/x/text/unicode/norm/transform.go | 88 + .../vendor/golang.org/x/text/unicode/norm/trie.go | 54 + .../golang.org/x/text/unicode/norm/triegen.go | 117 + .../vendor/golang.org/x/text/width/gen.go | 115 + .../vendor/golang.org/x/text/width/gen_common.go | 96 + .../vendor/golang.org/x/text/width/gen_trieval.go | 34 + .../vendor/golang.org/x/text/width/kind_string.go | 16 + .../vendor/golang.org/x/text/width/tables.go | 1284 + .../vendor/golang.org/x/text/width/transform.go | 162 + .../vendor/golang.org/x/text/width/trieval.go | 30 + .../vendor/golang.org/x/text/width/width.go | 201 + src/prometheus/vendor/golang.org/x/time/LICENSE | 27 + src/prometheus/vendor/golang.org/x/time/PATENTS | 22 + .../vendor/golang.org/x/time/rate/rate.go | 380 + .../vendor/golang.org/x/time/rate/rate_go16.go | 21 + .../vendor/golang.org/x/time/rate/rate_go17.go | 21 + .../vendor/google.golang.org/api/LICENSE | 27 + .../api/compute/v1/compute-api.json | 30781 +++++++ .../api/compute/v1/compute-gen.go | 84773 +++++++++++++++++++ .../google.golang.org/api/gensupport/backoff.go | 46 + .../google.golang.org/api/gensupport/buffer.go | 77 + .../vendor/google.golang.org/api/gensupport/doc.go | 10 + .../google.golang.org/api/gensupport/go18.go | 17 + .../google.golang.org/api/gensupport/header.go | 22 + .../google.golang.org/api/gensupport/json.go | 211 + .../google.golang.org/api/gensupport/jsonfloat.go | 57 + .../google.golang.org/api/gensupport/media.go | 331 + .../google.golang.org/api/gensupport/not_go18.go | 14 + .../google.golang.org/api/gensupport/params.go | 50 + .../google.golang.org/api/gensupport/resumable.go | 217 + .../google.golang.org/api/gensupport/retry.go | 85 + .../google.golang.org/api/gensupport/send.go | 71 + .../google.golang.org/api/googleapi/googleapi.go | 406 + .../api/googleapi/internal/uritemplates/LICENSE | 18 + .../internal/uritemplates/uritemplates.go | 248 + .../api/googleapi/internal/uritemplates/utils.go | 17 + .../google.golang.org/api/googleapi/types.go | 202 + .../vendor/google.golang.org/appengine/LICENSE | 202 + .../vendor/google.golang.org/appengine/README.md | 71 + .../google.golang.org/appengine/appengine.go | 76 + .../google.golang.org/appengine/appengine_vm.go | 56 + .../vendor/google.golang.org/appengine/errors.go | 46 + .../vendor/google.golang.org/appengine/identity.go | 142 + .../google.golang.org/appengine/internal/api.go | 646 + .../appengine/internal/api_classic.go | 159 + .../appengine/internal/api_common.go | 86 + .../google.golang.org/appengine/internal/app_id.go | 28 + .../app_identity/app_identity_service.pb.go | 296 + .../app_identity/app_identity_service.proto | 64 + .../appengine/internal/base/api_base.pb.go | 133 + .../appengine/internal/base/api_base.proto | 33 + .../internal/datastore/datastore_v3.pb.go | 2778 + .../internal/datastore/datastore_v3.proto | 541 + .../appengine/internal/identity.go | 14 + .../appengine/internal/identity_classic.go | 27 + .../appengine/internal/identity_vm.go | 97 + .../appengine/internal/internal.go | 144 + .../appengine/internal/log/log_service.pb.go | 899 + .../appengine/internal/log/log_service.proto | 150 + .../appengine/internal/metadata.go | 61 + .../internal/modules/modules_service.pb.go | 375 + .../internal/modules/modules_service.proto | 80 + .../google.golang.org/appengine/internal/net.go | 56 + .../google.golang.org/appengine/internal/regen.sh | 40 + .../appengine/internal/remote_api/remote_api.pb.go | 231 + .../appengine/internal/remote_api/remote_api.proto | 44 + .../appengine/internal/transaction.go | 107 + .../internal/urlfetch/urlfetch_service.pb.go | 355 + .../internal/urlfetch/urlfetch_service.proto | 64 + .../google.golang.org/appengine/namespace.go | 25 + .../vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/urlfetch/urlfetch.go | 210 + .../vendor/google.golang.org/cloud/LICENSE | 202 + .../cloud/compute/metadata/metadata.go | 382 + .../google.golang.org/cloud/internal/cloud.go | 128 + .../vendor/google.golang.org/genproto/LICENSE | 202 + .../googleapis/api/annotations/annotations.pb.go | 64 + .../genproto/googleapis/api/annotations/http.pb.go | 566 + .../genproto/googleapis/rpc/status/status.pb.go | 144 + .../vendor/google.golang.org/grpc/CONTRIBUTING.md | 46 + .../vendor/google.golang.org/grpc/LICENSE | 28 + .../vendor/google.golang.org/grpc/Makefile | 52 + .../vendor/google.golang.org/grpc/PATENTS | 22 + .../vendor/google.golang.org/grpc/README.md | 45 + .../vendor/google.golang.org/grpc/backoff.go | 80 + .../vendor/google.golang.org/grpc/balancer.go | 408 + .../vendor/google.golang.org/grpc/call.go | 307 + .../vendor/google.golang.org/grpc/clientconn.go | 1047 + .../vendor/google.golang.org/grpc/codec.go | 119 + .../vendor/google.golang.org/grpc/codegen.sh | 17 + .../google.golang.org/grpc/codes/code_string.go | 16 + .../vendor/google.golang.org/grpc/codes/codes.go | 159 + .../vendor/google.golang.org/grpc/coverage.sh | 48 + .../grpc/credentials/credentials.go | 234 + .../grpc/credentials/credentials_util_go17.go | 75 + .../grpc/credentials/credentials_util_go18.go | 53 + .../grpc/credentials/credentials_util_pre_go17.go | 72 + .../vendor/google.golang.org/grpc/doc.go | 6 + .../vendor/google.golang.org/grpc/go16.go | 56 + .../vendor/google.golang.org/grpc/go17.go | 55 + .../vendor/google.golang.org/grpc/grpclb.go | 762 + .../grpc/grpclb/grpc_lb_v1/grpclb.pb.go | 629 + .../grpc/grpclb/grpc_lb_v1/grpclb.proto | 179 + .../google.golang.org/grpc/grpclog/logger.go | 93 + .../vendor/google.golang.org/grpc/interceptor.go | 90 + .../google.golang.org/grpc/internal/internal.go | 49 + .../google.golang.org/grpc/keepalive/keepalive.go | 80 + .../google.golang.org/grpc/metadata/metadata.go | 144 + .../vendor/google.golang.org/grpc/naming/naming.go | 74 + .../vendor/google.golang.org/grpc/peer/peer.go | 66 + .../vendor/google.golang.org/grpc/proxy.go | 145 + .../vendor/google.golang.org/grpc/rpc_util.go | 538 + .../vendor/google.golang.org/grpc/server.go | 1123 + .../google.golang.org/grpc/stats/handlers.go | 79 + .../vendor/google.golang.org/grpc/stats/stats.go | 223 + .../vendor/google.golang.org/grpc/status/status.go | 145 + .../vendor/google.golang.org/grpc/stream.go | 636 + .../vendor/google.golang.org/grpc/tap/tap.go | 54 + .../vendor/google.golang.org/grpc/trace.go | 119 + .../google.golang.org/grpc/transport/control.go | 207 + .../google.golang.org/grpc/transport/go16.go | 46 + .../google.golang.org/grpc/transport/go17.go | 46 + .../grpc/transport/handler_server.go | 413 + .../grpc/transport/http2_client.go | 1286 + .../grpc/transport/http2_server.go | 1079 + .../google.golang.org/grpc/transport/http_util.go | 613 + .../google.golang.org/grpc/transport/transport.go | 671 + .../vendor/gopkg.in/alecthomas/kingpin.v2/COPYING | 19 + .../gopkg.in/alecthomas/kingpin.v2/README.md | 674 + .../gopkg.in/alecthomas/kingpin.v2/actions.go | 42 + .../vendor/gopkg.in/alecthomas/kingpin.v2/app.go | 685 + .../vendor/gopkg.in/alecthomas/kingpin.v2/args.go | 184 + .../vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go | 274 + .../gopkg.in/alecthomas/kingpin.v2/completions.go | 33 + .../vendor/gopkg.in/alecthomas/kingpin.v2/doc.go | 68 + .../vendor/gopkg.in/alecthomas/kingpin.v2/envar.go | 45 + .../vendor/gopkg.in/alecthomas/kingpin.v2/flags.go | 308 + .../gopkg.in/alecthomas/kingpin.v2/global.go | 94 + .../gopkg.in/alecthomas/kingpin.v2/guesswidth.go | 9 + .../alecthomas/kingpin.v2/guesswidth_unix.go | 38 + .../vendor/gopkg.in/alecthomas/kingpin.v2/model.go | 227 + .../gopkg.in/alecthomas/kingpin.v2/parser.go | 382 + .../gopkg.in/alecthomas/kingpin.v2/parsers.go | 212 + .../gopkg.in/alecthomas/kingpin.v2/templates.go | 262 + .../vendor/gopkg.in/alecthomas/kingpin.v2/usage.go | 211 + .../gopkg.in/alecthomas/kingpin.v2/values.go | 470 + .../gopkg.in/alecthomas/kingpin.v2/values.json | 25 + .../alecthomas/kingpin.v2/values_generated.go | 821 + .../vendor/gopkg.in/fsnotify/fsnotify.v1/AUTHORS | 43 + .../gopkg.in/fsnotify/fsnotify.v1/CHANGELOG.md | 291 + .../gopkg.in/fsnotify/fsnotify.v1/CONTRIBUTING.md | 77 + .../vendor/gopkg.in/fsnotify/fsnotify.v1/LICENSE | 28 + .../vendor/gopkg.in/fsnotify/fsnotify.v1/README.md | 50 + .../vendor/gopkg.in/fsnotify/fsnotify.v1/fen.go | 37 + .../gopkg.in/fsnotify/fsnotify.v1/fsnotify.go | 62 + .../gopkg.in/fsnotify/fsnotify.v1/inotify.go | 325 + .../fsnotify/fsnotify.v1/inotify_poller.go | 187 + .../vendor/gopkg.in/fsnotify/fsnotify.v1/kqueue.go | 503 + .../gopkg.in/fsnotify/fsnotify.v1/open_mode_bsd.go | 11 + .../fsnotify/fsnotify.v1/open_mode_darwin.go | 12 + .../gopkg.in/fsnotify/fsnotify.v1/windows.go | 561 + src/prometheus/vendor/gopkg.in/inf.v0/LICENSE | 28 + src/prometheus/vendor/gopkg.in/inf.v0/dec.go | 615 + src/prometheus/vendor/gopkg.in/inf.v0/rounder.go | 145 + src/prometheus/vendor/gopkg.in/yaml.v2/LICENSE | 201 + .../vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + src/prometheus/vendor/gopkg.in/yaml.v2/README.md | 133 + src/prometheus/vendor/gopkg.in/yaml.v2/apic.go | 739 + src/prometheus/vendor/gopkg.in/yaml.v2/decode.go | 775 + src/prometheus/vendor/gopkg.in/yaml.v2/emitterc.go | 1685 + src/prometheus/vendor/gopkg.in/yaml.v2/encode.go | 356 + src/prometheus/vendor/gopkg.in/yaml.v2/parserc.go | 1095 + src/prometheus/vendor/gopkg.in/yaml.v2/readerc.go | 412 + src/prometheus/vendor/gopkg.in/yaml.v2/resolve.go | 258 + src/prometheus/vendor/gopkg.in/yaml.v2/scannerc.go | 2696 + src/prometheus/vendor/gopkg.in/yaml.v2/sorter.go | 113 + src/prometheus/vendor/gopkg.in/yaml.v2/writerc.go | 26 + src/prometheus/vendor/gopkg.in/yaml.v2/yaml.go | 466 + src/prometheus/vendor/gopkg.in/yaml.v2/yamlh.go | 738 + .../vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 + src/prometheus/vendor/k8s.io/apimachinery/LICENSE | 202 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 27 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 + .../k8s.io/apimachinery/pkg/api/errors/errors.go | 478 + .../vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS | 26 + .../k8s.io/apimachinery/pkg/api/meta/default.go | 51 + .../vendor/k8s.io/apimachinery/pkg/api/meta/doc.go | 19 + .../k8s.io/apimachinery/pkg/api/meta/errors.go | 105 + .../pkg/api/meta/firsthit_restmapper.go | 97 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 199 + .../k8s.io/apimachinery/pkg/api/meta/interfaces.go | 147 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 581 + .../apimachinery/pkg/api/meta/multirestmapper.go | 231 + .../k8s.io/apimachinery/pkg/api/meta/priority.go | 226 + .../k8s.io/apimachinery/pkg/api/meta/restmapper.go | 566 + .../apimachinery/pkg/api/meta/unstructured.go | 31 + .../k8s.io/apimachinery/pkg/api/resource/OWNERS | 17 + .../k8s.io/apimachinery/pkg/api/resource/amount.go | 299 + .../apimachinery/pkg/api/resource/generated.pb.go | 71 + .../apimachinery/pkg/api/resource/generated.proto | 94 + .../k8s.io/apimachinery/pkg/api/resource/math.go | 327 + .../apimachinery/pkg/api/resource/quantity.go | 792 + .../pkg/api/resource/quantity_proto.go | 284 + .../apimachinery/pkg/api/resource/scale_int.go | 95 + .../k8s.io/apimachinery/pkg/api/resource/suffix.go | 198 + .../pkg/apimachinery/announced/announced.go | 99 + .../pkg/apimachinery/announced/group_factory.go | 252 + .../k8s.io/apimachinery/pkg/apimachinery/doc.go | 20 + .../pkg/apimachinery/registered/registered.go | 376 + .../k8s.io/apimachinery/pkg/apimachinery/types.go | 87 + .../k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS | 33 + .../apimachinery/pkg/apis/meta/v1/conversion.go | 264 + .../k8s.io/apimachinery/pkg/apis/meta/v1/doc.go | 22 + .../apimachinery/pkg/apis/meta/v1/duration.go | 47 + .../apimachinery/pkg/apis/meta/v1/generated.pb.go | 6915 ++ .../apimachinery/pkg/apis/meta/v1/generated.proto | 689 + .../apimachinery/pkg/apis/meta/v1/group_version.go | 148 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 234 + .../k8s.io/apimachinery/pkg/apis/meta/v1/labels.go | 75 + .../k8s.io/apimachinery/pkg/apis/meta/v1/meta.go | 209 + .../apimachinery/pkg/apis/meta/v1/register.go | 82 + .../k8s.io/apimachinery/pkg/apis/meta/v1/time.go | 180 + .../apimachinery/pkg/apis/meta/v1/time_proto.go | 85 + .../k8s.io/apimachinery/pkg/apis/meta/v1/types.go | 792 + .../apis/meta/v1/types_swagger_doc_generated.go | 290 + .../pkg/apis/meta/v1/unstructured/unstructured.go | 689 + .../k8s.io/apimachinery/pkg/apis/meta/v1/watch.go | 80 + .../pkg/apis/meta/v1/well_known_labels.go | 84 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 554 + .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 + .../k8s.io/apimachinery/pkg/conversion/OWNERS | 10 + .../k8s.io/apimachinery/pkg/conversion/cloner.go | 249 + .../apimachinery/pkg/conversion/converter.go | 953 + .../apimachinery/pkg/conversion/deep_equal.go | 36 + .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 + .../k8s.io/apimachinery/pkg/conversion/helper.go | 39 + .../pkg/conversion/queryparams/convert.go | 188 + .../apimachinery/pkg/conversion/queryparams/doc.go | 19 + .../vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/fields.go | 62 + .../k8s.io/apimachinery/pkg/fields/requirements.go | 30 + .../k8s.io/apimachinery/pkg/fields/selector.go | 413 + .../vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 + .../k8s.io/apimachinery/pkg/labels/labels.go | 181 + .../k8s.io/apimachinery/pkg/labels/selector.go | 836 + .../k8s.io/apimachinery/pkg/openapi/common.go | 160 + .../vendor/k8s.io/apimachinery/pkg/openapi/doc.go | 18 + .../vendor/k8s.io/apimachinery/pkg/runtime/OWNERS | 19 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 316 + .../k8s.io/apimachinery/pkg/runtime/codec_check.go | 50 + .../k8s.io/apimachinery/pkg/runtime/conversion.go | 98 + .../vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 45 + .../k8s.io/apimachinery/pkg/runtime/embedded.go | 136 + .../k8s.io/apimachinery/pkg/runtime/error.go | 102 + .../k8s.io/apimachinery/pkg/runtime/extension.go | 48 + .../apimachinery/pkg/runtime/generated.pb.go | 767 + .../apimachinery/pkg/runtime/generated.proto | 128 + .../k8s.io/apimachinery/pkg/runtime/helper.go | 212 + .../k8s.io/apimachinery/pkg/runtime/interfaces.go | 251 + .../k8s.io/apimachinery/pkg/runtime/register.go | 61 + .../pkg/runtime/schema/generated.pb.go | 59 + .../pkg/runtime/schema/generated.proto | 28 + .../pkg/runtime/schema/group_version.go | 277 + .../apimachinery/pkg/runtime/schema/interfaces.go | 40 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 601 + .../apimachinery/pkg/runtime/scheme_builder.go | 48 + .../pkg/runtime/serializer/codec_factory.go | 237 + .../pkg/runtime/serializer/json/json.go | 245 + .../pkg/runtime/serializer/json/meta.go | 63 + .../pkg/runtime/serializer/negotiated_codec.go | 43 + .../pkg/runtime/serializer/protobuf/doc.go | 18 + .../pkg/runtime/serializer/protobuf/protobuf.go | 448 + .../pkg/runtime/serializer/protobuf_extension.go | 48 + .../runtime/serializer/recognizer/recognizer.go | 127 + .../pkg/runtime/serializer/streaming/streaming.go | 137 + .../runtime/serializer/versioning/versioning.go | 273 + .../pkg/runtime/swagger_doc_generator.go | 262 + .../k8s.io/apimachinery/pkg/runtime/types.go | 133 + .../k8s.io/apimachinery/pkg/runtime/types_proto.go | 69 + .../pkg/runtime/zz_generated.deepcopy.go | 80 + .../k8s.io/apimachinery/pkg/selection/operator.go | 33 + .../vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 + .../apimachinery/pkg/types/namespacedname.go | 60 + .../k8s.io/apimachinery/pkg/types/nodename.go | 43 + .../vendor/k8s.io/apimachinery/pkg/types/patch.go | 28 + .../vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 + .../k8s.io/apimachinery/pkg/types/unix_user_id.go | 23 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 280 + .../k8s.io/apimachinery/pkg/util/errors/doc.go | 18 + .../k8s.io/apimachinery/pkg/util/errors/errors.go | 182 + .../k8s.io/apimachinery/pkg/util/framer/framer.go | 167 + .../apimachinery/pkg/util/intstr/generated.pb.go | 374 + .../apimachinery/pkg/util/intstr/generated.proto | 43 + .../k8s.io/apimachinery/pkg/util/intstr/intstr.go | 177 + .../k8s.io/apimachinery/pkg/util/json/json.go | 107 + .../k8s.io/apimachinery/pkg/util/net/http.go | 269 + .../k8s.io/apimachinery/pkg/util/net/interface.go | 278 + .../k8s.io/apimachinery/pkg/util/net/port_range.go | 113 + .../k8s.io/apimachinery/pkg/util/net/port_split.go | 77 + .../k8s.io/apimachinery/pkg/util/net/util.go | 46 + .../k8s.io/apimachinery/pkg/util/rand/rand.go | 85 + .../apimachinery/pkg/util/runtime/runtime.go | 161 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 + .../k8s.io/apimachinery/pkg/util/sets/empty.go | 23 + .../k8s.io/apimachinery/pkg/util/sets/int.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/int64.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/string.go | 203 + .../pkg/util/validation/field/errors.go | 254 + .../apimachinery/pkg/util/validation/field/path.go | 91 + .../apimachinery/pkg/util/validation/validation.go | 343 + .../k8s.io/apimachinery/pkg/util/wait/doc.go | 19 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 332 + .../k8s.io/apimachinery/pkg/util/yaml/decoder.go | 346 + .../vendor/k8s.io/apimachinery/pkg/version/doc.go | 19 + .../k8s.io/apimachinery/pkg/version/types.go | 37 + .../vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 + .../vendor/k8s.io/apimachinery/pkg/watch/filter.go | 109 + .../vendor/k8s.io/apimachinery/pkg/watch/mux.go | 257 + .../k8s.io/apimachinery/pkg/watch/streamwatcher.go | 119 + .../vendor/k8s.io/apimachinery/pkg/watch/until.go | 87 + .../vendor/k8s.io/apimachinery/pkg/watch/watch.go | 269 + .../forked/golang/reflect/deep_equal.go | 388 + .../third_party/forked/golang/reflect/type.go | 91 + src/prometheus/vendor/k8s.io/client-go/LICENSE | 202 + .../k8s.io/client-go/discovery/discovery_client.go | 439 + .../k8s.io/client-go/discovery/fake/discovery.go | 97 + .../vendor/k8s.io/client-go/discovery/helper.go | 162 + .../k8s.io/client-go/discovery/restmapper.go | 320 + .../k8s.io/client-go/discovery/unstructured.go | 95 + .../k8s.io/client-go/kubernetes/clientset.go | 514 + .../vendor/k8s.io/client-go/kubernetes/doc.go | 20 + .../kubernetes/fake/clientset_generated.go | 245 + .../vendor/k8s.io/client-go/kubernetes/fake/doc.go | 20 + .../k8s.io/client-go/kubernetes/fake/register.go | 68 + .../client-go/kubernetes/import_known_versions.go | 42 + .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 + .../k8s.io/client-go/kubernetes/scheme/register.go | 87 + .../kubernetes/typed/apps/v1beta1/apps_client.go | 98 + .../kubernetes/typed/apps/v1beta1/deployment.go | 172 + .../client-go/kubernetes/typed/apps/v1beta1/doc.go | 20 + .../kubernetes/typed/apps/v1beta1/fake/doc.go | 20 + .../typed/apps/v1beta1/fake/fake_apps_client.go | 46 + .../typed/apps/v1beta1/fake/fake_deployment.go | 128 + .../typed/apps/v1beta1/fake/fake_scale.go | 23 + .../typed/apps/v1beta1/fake/fake_statefulset.go | 128 + .../typed/apps/v1beta1/generated_expansion.go | 23 + .../kubernetes/typed/apps/v1beta1/scale.go | 46 + .../kubernetes/typed/apps/v1beta1/statefulset.go | 172 + .../authentication/v1/authentication_client.go | 88 + .../kubernetes/typed/authentication/v1/doc.go | 20 + .../kubernetes/typed/authentication/v1/fake/doc.go | 20 + .../v1/fake/fake_authentication_client.go | 38 + .../authentication/v1/fake/fake_tokenreview.go | 22 + .../v1/fake/fake_tokenreview_expansion.go | 27 + .../typed/authentication/v1/generated_expansion.go | 17 + .../typed/authentication/v1/tokenreview.go | 44 + .../authentication/v1/tokenreview_expansion.go | 35 + .../v1beta1/authentication_client.go | 88 + .../kubernetes/typed/authentication/v1beta1/doc.go | 20 + .../typed/authentication/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_authentication_client.go | 38 + .../v1beta1/fake/fake_tokenreview.go | 22 + .../v1beta1/fake/fake_tokenreview_expansion.go | 27 + .../authentication/v1beta1/generated_expansion.go | 17 + .../typed/authentication/v1beta1/tokenreview.go | 44 + .../v1beta1/tokenreview_expansion.go | 35 + .../typed/authorization/v1/authorization_client.go | 98 + .../kubernetes/typed/authorization/v1/doc.go | 20 + .../kubernetes/typed/authorization/v1/fake/doc.go | 20 + .../v1/fake/fake_authorization_client.go | 46 + .../v1/fake/fake_localsubjectaccessreview.go | 23 + .../fake_localsubjectaccessreview_expansion.go | 27 + .../v1/fake/fake_selfsubjectaccessreview.go | 22 + .../fake/fake_selfsubjectaccessreview_expansion.go | 27 + .../v1/fake/fake_subjectaccessreview.go | 22 + .../v1/fake/fake_subjectaccessreview_expansion.go | 27 + .../typed/authorization/v1/generated_expansion.go | 17 + .../authorization/v1/localsubjectaccessreview.go | 46 + .../v1/localsubjectaccessreview_expansion.go | 36 + .../authorization/v1/selfsubjectaccessreview.go | 44 + .../v1/selfsubjectaccessreview_expansion.go | 35 + .../typed/authorization/v1/subjectaccessreview.go | 44 + .../v1/subjectaccessreview_expansion.go | 36 + .../authorization/v1beta1/authorization_client.go | 98 + .../kubernetes/typed/authorization/v1beta1/doc.go | 20 + .../typed/authorization/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_authorization_client.go | 46 + .../v1beta1/fake/fake_generated_expansion.go | 17 + .../v1beta1/fake/fake_localsubjectaccessreview.go | 23 + .../fake_localsubjectaccessreview_expansion.go | 27 + .../v1beta1/fake/fake_selfsubjectaccessreview.go | 22 + .../fake/fake_selfsubjectaccessreview_expansion.go | 27 + .../v1beta1/fake/fake_subjectaccessreview.go | 22 + .../fake/fake_subjectaccessreview_expansion.go | 27 + .../authorization/v1beta1/generated_expansion.go | 17 + .../v1beta1/localsubjectaccessreview.go | 46 + .../v1beta1/localsubjectaccessreview_expansion.go | 36 + .../v1beta1/selfsubjectaccessreview.go | 44 + .../v1beta1/selfsubjectaccessreview_expansion.go | 35 + .../authorization/v1beta1/subjectaccessreview.go | 44 + .../v1beta1/subjectaccessreview_expansion.go | 36 + .../typed/autoscaling/v1/autoscaling_client.go | 88 + .../kubernetes/typed/autoscaling/v1/doc.go | 20 + .../kubernetes/typed/autoscaling/v1/fake/doc.go | 20 + .../autoscaling/v1/fake/fake_autoscaling_client.go | 38 + .../v1/fake/fake_horizontalpodautoscaler.go | 128 + .../typed/autoscaling/v1/generated_expansion.go | 19 + .../autoscaling/v1/horizontalpodautoscaler.go | 172 + .../autoscaling/v2alpha1/autoscaling_client.go | 88 + .../kubernetes/typed/autoscaling/v2alpha1/doc.go | 20 + .../typed/autoscaling/v2alpha1/fake/doc.go | 20 + .../v2alpha1/fake/fake_autoscaling_client.go | 38 + .../v2alpha1/fake/fake_horizontalpodautoscaler.go | 128 + .../autoscaling/v2alpha1/generated_expansion.go | 19 + .../v2alpha1/horizontalpodautoscaler.go | 172 + .../kubernetes/typed/batch/v1/batch_client.go | 88 + .../client-go/kubernetes/typed/batch/v1/doc.go | 20 + .../kubernetes/typed/batch/v1/fake/doc.go | 20 + .../typed/batch/v1/fake/fake_batch_client.go | 38 + .../kubernetes/typed/batch/v1/fake/fake_job.go | 128 + .../typed/batch/v1/generated_expansion.go | 19 + .../client-go/kubernetes/typed/batch/v1/job.go | 172 + .../typed/batch/v2alpha1/batch_client.go | 88 + .../kubernetes/typed/batch/v2alpha1/cronjob.go | 172 + .../kubernetes/typed/batch/v2alpha1/doc.go | 20 + .../kubernetes/typed/batch/v2alpha1/fake/doc.go | 20 + .../typed/batch/v2alpha1/fake/fake_batch_client.go | 38 + .../typed/batch/v2alpha1/fake/fake_cronjob.go | 128 + .../typed/batch/v2alpha1/generated_expansion.go | 19 + .../certificates/v1beta1/certificates_client.go | 88 + .../v1beta1/certificatesigningrequest.go | 161 + .../v1beta1/certificatesigningrequest_expansion.go | 37 + .../kubernetes/typed/certificates/v1beta1/doc.go | 20 + .../typed/certificates/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_certificates_client.go | 38 + .../v1beta1/fake/fake_certificatesigningrequest.go | 119 + .../fake_certificatesigningrequest_expansion.go | 31 + .../certificates/v1beta1/generated_expansion.go | 17 + .../kubernetes/typed/core/v1/componentstatus.go | 145 + .../kubernetes/typed/core/v1/configmap.go | 155 + .../kubernetes/typed/core/v1/core_client.go | 163 + .../client-go/kubernetes/typed/core/v1/doc.go | 20 + .../kubernetes/typed/core/v1/endpoints.go | 155 + .../client-go/kubernetes/typed/core/v1/event.go | 155 + .../kubernetes/typed/core/v1/event_expansion.go | 163 + .../client-go/kubernetes/typed/core/v1/fake/doc.go | 20 + .../typed/core/v1/fake/fake_componentstatus.go | 110 + .../typed/core/v1/fake/fake_configmap.go | 118 + .../typed/core/v1/fake/fake_core_client.go | 98 + .../typed/core/v1/fake/fake_endpoints.go | 118 + .../kubernetes/typed/core/v1/fake/fake_event.go | 118 + .../typed/core/v1/fake/fake_event_expansion.go | 89 + .../typed/core/v1/fake/fake_limitrange.go | 118 + .../typed/core/v1/fake/fake_namespace.go | 119 + .../typed/core/v1/fake/fake_namespace_expansion.go | 37 + .../kubernetes/typed/core/v1/fake/fake_node.go | 119 + .../typed/core/v1/fake/fake_node_expansion.go | 32 + .../typed/core/v1/fake/fake_persistentvolume.go | 119 + .../core/v1/fake/fake_persistentvolumeclaim.go | 128 + .../kubernetes/typed/core/v1/fake/fake_pod.go | 128 + .../typed/core/v1/fake/fake_pod_expansion.go | 58 + .../typed/core/v1/fake/fake_podtemplate.go | 118 + .../core/v1/fake/fake_replicationcontroller.go | 128 + .../typed/core/v1/fake/fake_resourcequota.go | 128 + .../kubernetes/typed/core/v1/fake/fake_secret.go | 118 + .../kubernetes/typed/core/v1/fake/fake_service.go | 128 + .../typed/core/v1/fake/fake_service_expansion.go | 26 + .../typed/core/v1/fake/fake_serviceaccount.go | 118 + .../typed/core/v1/generated_expansion.go | 39 + .../kubernetes/typed/core/v1/limitrange.go | 155 + .../kubernetes/typed/core/v1/namespace.go | 161 + .../typed/core/v1/namespace_expansion.go | 31 + .../client-go/kubernetes/typed/core/v1/node.go | 161 + .../kubernetes/typed/core/v1/node_expansion.go | 43 + .../kubernetes/typed/core/v1/persistentvolume.go | 161 + .../typed/core/v1/persistentvolumeclaim.go | 172 + .../client-go/kubernetes/typed/core/v1/pod.go | 172 + .../kubernetes/typed/core/v1/pod_expansion.go | 45 + .../kubernetes/typed/core/v1/podtemplate.go | 155 + .../typed/core/v1/replicationcontroller.go | 172 + .../kubernetes/typed/core/v1/resourcequota.go | 172 + .../client-go/kubernetes/typed/core/v1/secret.go | 155 + .../client-go/kubernetes/typed/core/v1/service.go | 172 + .../kubernetes/typed/core/v1/service_expansion.go | 41 + .../kubernetes/typed/core/v1/serviceaccount.go | 155 + .../typed/extensions/v1beta1/daemonset.go | 172 + .../typed/extensions/v1beta1/deployment.go | 172 + .../extensions/v1beta1/deployment_expansion.go | 29 + .../kubernetes/typed/extensions/v1beta1/doc.go | 20 + .../typed/extensions/v1beta1/extensions_client.go | 118 + .../typed/extensions/v1beta1/fake/doc.go | 20 + .../extensions/v1beta1/fake/fake_daemonset.go | 128 + .../extensions/v1beta1/fake/fake_deployment.go | 128 + .../v1beta1/fake/fake_deployment_expansion.go | 33 + .../v1beta1/fake/fake_extensions_client.go | 62 + .../typed/extensions/v1beta1/fake/fake_ingress.go | 128 + .../v1beta1/fake/fake_podsecuritypolicy.go | 110 + .../extensions/v1beta1/fake/fake_replicaset.go | 128 + .../typed/extensions/v1beta1/fake/fake_scale.go | 23 + .../v1beta1/fake/fake_scale_expansion.go | 47 + .../v1beta1/fake/fake_thirdpartyresource.go | 110 + .../extensions/v1beta1/generated_expansion.go | 27 + .../kubernetes/typed/extensions/v1beta1/ingress.go | 172 + .../typed/extensions/v1beta1/podsecuritypolicy.go | 145 + .../typed/extensions/v1beta1/replicaset.go | 172 + .../kubernetes/typed/extensions/v1beta1/scale.go | 46 + .../typed/extensions/v1beta1/scale_expansion.go | 65 + .../typed/extensions/v1beta1/thirdpartyresource.go | 145 + .../kubernetes/typed/policy/v1beta1/doc.go | 20 + .../kubernetes/typed/policy/v1beta1/eviction.go | 46 + .../typed/policy/v1beta1/eviction_expansion.go | 38 + .../kubernetes/typed/policy/v1beta1/fake/doc.go | 20 + .../typed/policy/v1beta1/fake/fake_eviction.go | 23 + .../policy/v1beta1/fake/fake_eviction_expansion.go | 33 + .../v1beta1/fake/fake_poddisruptionbudget.go | 128 + .../policy/v1beta1/fake/fake_policy_client.go | 42 + .../typed/policy/v1beta1/generated_expansion.go | 19 + .../typed/policy/v1beta1/poddisruptionbudget.go | 172 + .../typed/policy/v1beta1/policy_client.go | 93 + .../kubernetes/typed/rbac/v1alpha1/clusterrole.go | 145 + .../typed/rbac/v1alpha1/clusterrolebinding.go | 145 + .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 + .../kubernetes/typed/rbac/v1alpha1/fake/doc.go | 20 + .../typed/rbac/v1alpha1/fake/fake_clusterrole.go | 110 + .../rbac/v1alpha1/fake/fake_clusterrolebinding.go | 110 + .../typed/rbac/v1alpha1/fake/fake_rbac_client.go | 50 + .../typed/rbac/v1alpha1/fake/fake_role.go | 118 + .../typed/rbac/v1alpha1/fake/fake_rolebinding.go | 118 + .../typed/rbac/v1alpha1/generated_expansion.go | 25 + .../kubernetes/typed/rbac/v1alpha1/rbac_client.go | 103 + .../kubernetes/typed/rbac/v1alpha1/role.go | 155 + .../kubernetes/typed/rbac/v1alpha1/rolebinding.go | 155 + .../kubernetes/typed/rbac/v1beta1/clusterrole.go | 145 + .../typed/rbac/v1beta1/clusterrolebinding.go | 145 + .../client-go/kubernetes/typed/rbac/v1beta1/doc.go | 20 + .../kubernetes/typed/rbac/v1beta1/fake/doc.go | 20 + .../typed/rbac/v1beta1/fake/fake_clusterrole.go | 110 + .../rbac/v1beta1/fake/fake_clusterrolebinding.go | 110 + .../typed/rbac/v1beta1/fake/fake_rbac_client.go | 50 + .../typed/rbac/v1beta1/fake/fake_role.go | 118 + .../typed/rbac/v1beta1/fake/fake_rolebinding.go | 118 + .../typed/rbac/v1beta1/generated_expansion.go | 25 + .../kubernetes/typed/rbac/v1beta1/rbac_client.go | 103 + .../kubernetes/typed/rbac/v1beta1/role.go | 155 + .../kubernetes/typed/rbac/v1beta1/rolebinding.go | 155 + .../kubernetes/typed/settings/v1alpha1/doc.go | 20 + .../kubernetes/typed/settings/v1alpha1/fake/doc.go | 20 + .../typed/settings/v1alpha1/fake/fake_podpreset.go | 118 + .../settings/v1alpha1/fake/fake_settings_client.go | 38 + .../typed/settings/v1alpha1/generated_expansion.go | 19 + .../typed/settings/v1alpha1/podpreset.go | 155 + .../typed/settings/v1alpha1/settings_client.go | 88 + .../client-go/kubernetes/typed/storage/v1/doc.go | 20 + .../kubernetes/typed/storage/v1/fake/doc.go | 20 + .../typed/storage/v1/fake/fake_storage_client.go | 38 + .../typed/storage/v1/fake/fake_storageclass.go | 110 + .../typed/storage/v1/generated_expansion.go | 19 + .../kubernetes/typed/storage/v1/storage_client.go | 88 + .../kubernetes/typed/storage/v1/storageclass.go | 145 + .../kubernetes/typed/storage/v1beta1/doc.go | 20 + .../kubernetes/typed/storage/v1beta1/fake/doc.go | 20 + .../storage/v1beta1/fake/fake_storage_client.go | 38 + .../storage/v1beta1/fake/fake_storageclass.go | 110 + .../typed/storage/v1beta1/generated_expansion.go | 19 + .../typed/storage/v1beta1/storage_client.go | 88 + .../typed/storage/v1beta1/storageclass.go | 145 + .../vendor/k8s.io/client-go/pkg/api/OWNERS | 44 + .../vendor/k8s.io/client-go/pkg/api/defaults.go | 36 + .../vendor/k8s.io/client-go/pkg/api/doc.go | 22 + .../k8s.io/client-go/pkg/api/field_constants.go | 38 + .../vendor/k8s.io/client-go/pkg/api/helpers.go | 691 + .../vendor/k8s.io/client-go/pkg/api/install/OWNERS | 11 + .../k8s.io/client-go/pkg/api/install/install.go | 70 + .../vendor/k8s.io/client-go/pkg/api/json.go | 28 + .../vendor/k8s.io/client-go/pkg/api/ref.go | 132 + .../vendor/k8s.io/client-go/pkg/api/register.go | 135 + .../k8s.io/client-go/pkg/api/resource_helpers.go | 229 + .../vendor/k8s.io/client-go/pkg/api/types.go | 3822 + .../vendor/k8s.io/client-go/pkg/api/v1/OWNERS | 41 + .../k8s.io/client-go/pkg/api/v1/conversion.go | 785 + .../vendor/k8s.io/client-go/pkg/api/v1/defaults.go | 389 + .../vendor/k8s.io/client-go/pkg/api/v1/doc.go | 18 + .../vendor/k8s.io/client-go/pkg/api/v1/generate.go | 64 + .../k8s.io/client-go/pkg/api/v1/generated.pb.go | 43238 ++++++++++ .../k8s.io/client-go/pkg/api/v1/generated.proto | 3922 + .../vendor/k8s.io/client-go/pkg/api/v1/helpers.go | 632 + .../vendor/k8s.io/client-go/pkg/api/v1/meta.go | 98 + .../vendor/k8s.io/client-go/pkg/api/v1/ref.go | 133 + .../vendor/k8s.io/client-go/pkg/api/v1/register.go | 96 + .../client-go/pkg/api/v1/resource_helpers.go | 257 + .../k8s.io/client-go/pkg/api/v1/types.generated.go | 73800 ++++++++++++++++ .../vendor/k8s.io/client-go/pkg/api/v1/types.go | 4380 + .../pkg/api/v1/types_swagger_doc_generated.go | 1960 + .../pkg/api/v1/zz_generated.conversion.go | 4702 + .../client-go/pkg/api/v1/zz_generated.deepcopy.go | 3500 + .../client-go/pkg/api/v1/zz_generated.defaults.go | 631 + .../client-go/pkg/api/zz_generated.deepcopy.go | 3527 + .../vendor/k8s.io/client-go/pkg/apis/apps/OWNERS | 21 + .../vendor/k8s.io/client-go/pkg/apis/apps/doc.go | 17 + .../client-go/pkg/apis/apps/install/install.go | 49 + .../k8s.io/client-go/pkg/apis/apps/register.go | 58 + .../vendor/k8s.io/client-go/pkg/apis/apps/types.go | 103 + .../client-go/pkg/apis/apps/v1beta1/conversion.go | 297 + .../client-go/pkg/apis/apps/v1beta1/defaults.go | 103 + .../k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go | 17 + .../pkg/apis/apps/v1beta1/generated.pb.go | 3939 + .../pkg/apis/apps/v1beta1/generated.proto | 342 + .../client-go/pkg/apis/apps/v1beta1/register.go | 53 + .../pkg/apis/apps/v1beta1/types.generated.go | 6485 ++ .../client-go/pkg/apis/apps/v1beta1/types.go | 375 + .../apps/v1beta1/types_swagger_doc_generated.go | 208 + .../apis/apps/v1beta1/zz_generated.conversion.go | 166 + .../pkg/apis/apps/v1beta1/zz_generated.deepcopy.go | 355 + .../pkg/apis/apps/v1beta1/zz_generated.defaults.go | 326 + .../pkg/apis/apps/zz_generated.deepcopy.go | 125 + .../client-go/pkg/apis/authentication/OWNERS | 9 + .../client-go/pkg/apis/authentication/doc.go | 18 + .../pkg/apis/authentication/install/install.go | 53 + .../client-go/pkg/apis/authentication/register.go | 50 + .../client-go/pkg/apis/authentication/types.go | 89 + .../pkg/apis/authentication/v1/conversion.go | 26 + .../pkg/apis/authentication/v1/defaults.go | 25 + .../client-go/pkg/apis/authentication/v1/doc.go | 18 + .../pkg/apis/authentication/v1/generated.pb.go | 1281 + .../pkg/apis/authentication/v1/generated.proto | 100 + .../pkg/apis/authentication/v1/register.go | 48 + .../client-go/pkg/apis/authentication/v1/types.go | 91 + .../v1/types_swagger_doc_generated.go | 72 + .../authentication/v1/zz_generated.conversion.go | 145 + .../authentication/v1/zz_generated.deepcopy.go | 106 + .../authentication/v1/zz_generated.defaults.go | 32 + .../pkg/apis/authentication/v1beta1/conversion.go | 26 + .../pkg/apis/authentication/v1beta1/defaults.go | 25 + .../pkg/apis/authentication/v1beta1/doc.go | 18 + .../apis/authentication/v1beta1/generated.pb.go | 1282 + .../apis/authentication/v1beta1/generated.proto | 101 + .../pkg/apis/authentication/v1beta1/register.go | 48 + .../apis/authentication/v1beta1/types.generated.go | 1568 + .../pkg/apis/authentication/v1beta1/types.go | 91 + .../v1beta1/types_swagger_doc_generated.go | 72 + .../v1beta1/zz_generated.conversion.go | 145 + .../v1beta1/zz_generated.deepcopy.go | 106 + .../v1beta1/zz_generated.defaults.go | 32 + .../apis/authentication/zz_generated.deepcopy.go | 106 + .../k8s.io/client-go/pkg/apis/authorization/OWNERS | 17 + .../k8s.io/client-go/pkg/apis/authorization/doc.go | 18 + .../pkg/apis/authorization/install/install.go | 53 + .../client-go/pkg/apis/authorization/register.go | 52 + .../client-go/pkg/apis/authorization/types.go | 146 + .../pkg/apis/authorization/v1/conversion.go | 26 + .../pkg/apis/authorization/v1/defaults.go | 25 + .../client-go/pkg/apis/authorization/v1/doc.go | 18 + .../pkg/apis/authorization/v1/generated.pb.go | 2344 + .../pkg/apis/authorization/v1/generated.proto | 185 + .../pkg/apis/authorization/v1/register.go | 55 + .../pkg/apis/authorization/v1/types.generated.go | 3233 + .../client-go/pkg/apis/authorization/v1/types.go | 176 + .../v1/types_swagger_doc_generated.go | 119 + .../authorization/v1/zz_generated.conversion.go | 263 + .../apis/authorization/v1/zz_generated.deepcopy.go | 179 + .../apis/authorization/v1/zz_generated.defaults.go | 32 + .../pkg/apis/authorization/v1beta1/conversion.go | 26 + .../pkg/apis/authorization/v1beta1/defaults.go | 25 + .../pkg/apis/authorization/v1beta1/doc.go | 18 + .../pkg/apis/authorization/v1beta1/generated.pb.go | 2344 + .../pkg/apis/authorization/v1beta1/generated.proto | 185 + .../pkg/apis/authorization/v1beta1/register.go | 55 + .../apis/authorization/v1beta1/types.generated.go | 3233 + .../pkg/apis/authorization/v1beta1/types.go | 176 + .../v1beta1/types_swagger_doc_generated.go | 119 + .../v1beta1/zz_generated.conversion.go | 263 + .../authorization/v1beta1/zz_generated.deepcopy.go | 179 + .../authorization/v1beta1/zz_generated.defaults.go | 32 + .../apis/authorization/zz_generated.deepcopy.go | 179 + .../k8s.io/client-go/pkg/apis/autoscaling/OWNERS | 20 + .../client-go/pkg/apis/autoscaling/annotations.go | 30 + .../k8s.io/client-go/pkg/apis/autoscaling/doc.go | 17 + .../pkg/apis/autoscaling/install/install.go | 51 + .../client-go/pkg/apis/autoscaling/register.go | 53 + .../k8s.io/client-go/pkg/apis/autoscaling/types.go | 305 + .../pkg/apis/autoscaling/v1/conversion.go | 244 + .../client-go/pkg/apis/autoscaling/v1/defaults.go | 38 + .../client-go/pkg/apis/autoscaling/v1/doc.go | 17 + .../pkg/apis/autoscaling/v1/generated.pb.go | 3498 + .../pkg/apis/autoscaling/v1/generated.proto | 298 + .../client-go/pkg/apis/autoscaling/v1/register.go | 50 + .../pkg/apis/autoscaling/v1/types.generated.go | 5216 ++ .../client-go/pkg/apis/autoscaling/v1/types.go | 296 + .../autoscaling/v1/types_swagger_doc_generated.go | 205 + .../apis/autoscaling/v1/zz_generated.conversion.go | 449 + .../apis/autoscaling/v1/zz_generated.deepcopy.go | 312 + .../apis/autoscaling/v1/zz_generated.defaults.go | 47 + .../pkg/apis/autoscaling/v2alpha1/defaults.go | 49 + .../client-go/pkg/apis/autoscaling/v2alpha1/doc.go | 17 + .../pkg/apis/autoscaling/v2alpha1/generated.pb.go | 3062 + .../pkg/apis/autoscaling/v2alpha1/generated.proto | 275 + .../pkg/apis/autoscaling/v2alpha1/register.go | 44 + .../apis/autoscaling/v2alpha1/types.generated.go | 4621 + .../pkg/apis/autoscaling/v2alpha1/types.go | 269 + .../v2alpha1/types_swagger_doc_generated.go | 175 + .../v2alpha1/zz_generated.conversion.go | 387 + .../autoscaling/v2alpha1/zz_generated.deepcopy.go | 285 + .../pkg/apis/autoscaling/zz_generated.deepcopy.go | 320 + .../vendor/k8s.io/client-go/pkg/apis/batch/OWNERS | 19 + .../vendor/k8s.io/client-go/pkg/apis/batch/doc.go | 17 + .../client-go/pkg/apis/batch/install/install.go | 51 + .../k8s.io/client-go/pkg/apis/batch/register.go | 57 + .../k8s.io/client-go/pkg/apis/batch/types.go | 286 + .../client-go/pkg/apis/batch/v1/conversion.go | 84 + .../k8s.io/client-go/pkg/apis/batch/v1/defaults.go | 47 + .../k8s.io/client-go/pkg/apis/batch/v1/doc.go | 17 + .../client-go/pkg/apis/batch/v1/generated.pb.go | 1580 + .../client-go/pkg/apis/batch/v1/generated.proto | 168 + .../k8s.io/client-go/pkg/apis/batch/v1/register.go | 49 + .../client-go/pkg/apis/batch/v1/types.generated.go | 2681 + .../k8s.io/client-go/pkg/apis/batch/v1/types.go | 168 + .../apis/batch/v1/types_swagger_doc_generated.go | 93 + .../pkg/apis/batch/v1/zz_generated.conversion.go | 202 + .../pkg/apis/batch/v1/zz_generated.deepcopy.go | 162 + .../pkg/apis/batch/v1/zz_generated.defaults.go | 176 + .../pkg/apis/batch/v2alpha1/conversion.go | 44 + .../client-go/pkg/apis/batch/v2alpha1/defaults.go | 37 + .../client-go/pkg/apis/batch/v2alpha1/doc.go | 17 + .../pkg/apis/batch/v2alpha1/generated.pb.go | 1505 + .../pkg/apis/batch/v2alpha1/generated.proto | 134 + .../client-go/pkg/apis/batch/v2alpha1/register.go | 52 + .../pkg/apis/batch/v2alpha1/types.generated.go | 2525 + .../client-go/pkg/apis/batch/v2alpha1/types.go | 147 + .../batch/v2alpha1/types_swagger_doc_generated.go | 96 + .../apis/batch/v2alpha1/zz_generated.conversion.go | 227 + .../apis/batch/v2alpha1/zz_generated.deepcopy.go | 170 + .../apis/batch/v2alpha1/zz_generated.defaults.go | 310 + .../pkg/apis/batch/zz_generated.deepcopy.go | 291 + .../k8s.io/client-go/pkg/apis/certificates/OWNERS | 14 + .../k8s.io/client-go/pkg/apis/certificates/doc.go | 18 + .../client-go/pkg/apis/certificates/helpers.go | 38 + .../pkg/apis/certificates/install/install.go | 51 + .../client-go/pkg/apis/certificates/register.go | 55 + .../client-go/pkg/apis/certificates/types.go | 143 + .../pkg/apis/certificates/v1beta1/conversion.go | 38 + .../pkg/apis/certificates/v1beta1/defaults.go | 31 + .../client-go/pkg/apis/certificates/v1beta1/doc.go | 18 + .../pkg/apis/certificates/v1beta1/generated.pb.go | 1674 + .../pkg/apis/certificates/v1beta1/generated.proto | 124 + .../pkg/apis/certificates/v1beta1/helpers.go | 38 + .../pkg/apis/certificates/v1beta1/register.go | 59 + .../apis/certificates/v1beta1/types.generated.go | 2624 + .../pkg/apis/certificates/v1beta1/types.go | 152 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.conversion.go | 179 + .../certificates/v1beta1/zz_generated.deepcopy.go | 150 + .../certificates/v1beta1/zz_generated.defaults.go | 47 + .../pkg/apis/certificates/zz_generated.deepcopy.go | 150 + .../k8s.io/client-go/pkg/apis/extensions/OWNERS | 41 + .../k8s.io/client-go/pkg/apis/extensions/doc.go | 17 + .../client-go/pkg/apis/extensions/helpers.go | 37 + .../pkg/apis/extensions/install/install.go | 51 + .../client-go/pkg/apis/extensions/register.go | 70 + .../k8s.io/client-go/pkg/apis/extensions/types.go | 1124 + .../pkg/apis/extensions/v1beta1/conversion.go | 262 + .../pkg/apis/extensions/v1beta1/defaults.go | 138 + .../client-go/pkg/apis/extensions/v1beta1/doc.go | 17 + .../pkg/apis/extensions/v1beta1/generated.pb.go | 11993 +++ .../pkg/apis/extensions/v1beta1/generated.proto | 1005 + .../pkg/apis/extensions/v1beta1/register.go | 67 + .../pkg/apis/extensions/v1beta1/types.generated.go | 21745 +++++ .../client-go/pkg/apis/extensions/v1beta1/types.go | 1147 + .../v1beta1/types_swagger_doc_generated.go | 627 + .../extensions/v1beta1/zz_generated.conversion.go | 1628 + .../extensions/v1beta1/zz_generated.deepcopy.go | 1087 + .../extensions/v1beta1/zz_generated.defaults.go | 475 + .../pkg/apis/extensions/zz_generated.deepcopy.go | 1059 + .../vendor/k8s.io/client-go/pkg/apis/policy/OWNERS | 14 + .../vendor/k8s.io/client-go/pkg/apis/policy/doc.go | 17 + .../client-go/pkg/apis/policy/install/install.go | 49 + .../k8s.io/client-go/pkg/apis/policy/register.go | 54 + .../k8s.io/client-go/pkg/apis/policy/types.go | 113 + .../client-go/pkg/apis/policy/v1beta1/doc.go | 20 + .../pkg/apis/policy/v1beta1/generated.pb.go | 1375 + .../pkg/apis/policy/v1beta1/generated.proto | 109 + .../client-go/pkg/apis/policy/v1beta1/register.go | 51 + .../pkg/apis/policy/v1beta1/types.generated.go | 2203 + .../client-go/pkg/apis/policy/v1beta1/types.go | 105 + .../policy/v1beta1/types_swagger_doc_generated.go | 82 + .../apis/policy/v1beta1/zz_generated.conversion.go | 172 + .../apis/policy/v1beta1/zz_generated.deepcopy.go | 137 + .../pkg/apis/policy/zz_generated.deepcopy.go | 137 + .../vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS | 17 + .../vendor/k8s.io/client-go/pkg/apis/rbac/doc.go | 18 + .../k8s.io/client-go/pkg/apis/rbac/helpers.go | 340 + .../client-go/pkg/apis/rbac/install/install.go | 53 + .../k8s.io/client-go/pkg/apis/rbac/register.go | 58 + .../vendor/k8s.io/client-go/pkg/apis/rbac/types.go | 188 + .../client-go/pkg/apis/rbac/v1alpha1/conversion.go | 81 + .../client-go/pkg/apis/rbac/v1alpha1/defaults.go | 53 + .../k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go | 18 + .../pkg/apis/rbac/v1alpha1/generated.pb.go | 2817 + .../pkg/apis/rbac/v1alpha1/generated.proto | 202 + .../client-go/pkg/apis/rbac/v1alpha1/helpers.go | 146 + .../client-go/pkg/apis/rbac/v1alpha1/register.go | 55 + .../pkg/apis/rbac/v1alpha1/types.generated.go | 4879 ++ .../client-go/pkg/apis/rbac/v1alpha1/types.go | 209 + .../rbac/v1alpha1/types_swagger_doc_generated.go | 148 + .../apis/rbac/v1alpha1/zz_generated.conversion.go | 445 + .../apis/rbac/v1alpha1/zz_generated.deepcopy.go | 258 + .../apis/rbac/v1alpha1/zz_generated.defaults.go | 66 + .../client-go/pkg/apis/rbac/v1beta1/defaults.go | 53 + .../k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go | 18 + .../pkg/apis/rbac/v1beta1/generated.pb.go | 2816 + .../pkg/apis/rbac/v1beta1/generated.proto | 200 + .../client-go/pkg/apis/rbac/v1beta1/helpers.go | 146 + .../client-go/pkg/apis/rbac/v1beta1/register.go | 55 + .../pkg/apis/rbac/v1beta1/types.generated.go | 4879 ++ .../client-go/pkg/apis/rbac/v1beta1/types.go | 207 + .../rbac/v1beta1/types_swagger_doc_generated.go | 148 + .../apis/rbac/v1beta1/zz_generated.conversion.go | 389 + .../pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go | 258 + .../pkg/apis/rbac/v1beta1/zz_generated.defaults.go | 66 + .../pkg/apis/rbac/zz_generated.deepcopy.go | 258 + .../k8s.io/client-go/pkg/apis/settings/doc.go | 18 + .../client-go/pkg/apis/settings/install/install.go | 49 + .../k8s.io/client-go/pkg/apis/settings/register.go | 52 + .../k8s.io/client-go/pkg/apis/settings/types.go | 63 + .../client-go/pkg/apis/settings/v1alpha1/doc.go | 18 + .../pkg/apis/settings/v1alpha1/generated.pb.go | 924 + .../pkg/apis/settings/v1alpha1/generated.proto | 76 + .../pkg/apis/settings/v1alpha1/register.go | 49 + .../client-go/pkg/apis/settings/v1alpha1/types.go | 67 + .../v1alpha1/types_swagger_doc_generated.go | 61 + .../settings/v1alpha1/zz_generated.conversion.go | 159 + .../settings/v1alpha1/zz_generated.deepcopy.go | 124 + .../settings/v1alpha1/zz_generated.defaults.go | 98 + .../pkg/apis/settings/zz_generated.deepcopy.go | 124 + .../k8s.io/client-go/pkg/apis/storage/OWNERS | 3 + .../k8s.io/client-go/pkg/apis/storage/doc.go | 18 + .../client-go/pkg/apis/storage/install/install.go | 54 + .../k8s.io/client-go/pkg/apis/storage/register.go | 51 + .../k8s.io/client-go/pkg/apis/storage/types.go | 60 + .../k8s.io/client-go/pkg/apis/storage/v1/doc.go | 18 + .../client-go/pkg/apis/storage/v1/generated.pb.go | 730 + .../client-go/pkg/apis/storage/v1/generated.proto | 63 + .../client-go/pkg/apis/storage/v1/register.go | 50 + .../k8s.io/client-go/pkg/apis/storage/v1/types.go | 57 + .../apis/storage/v1/types_swagger_doc_generated.go | 51 + .../pkg/apis/storage/v1/zz_generated.conversion.go | 89 + .../pkg/apis/storage/v1/zz_generated.deepcopy.go | 80 + .../pkg/apis/storage/v1/zz_generated.defaults.go | 32 + .../client-go/pkg/apis/storage/v1beta1/doc.go | 18 + .../pkg/apis/storage/v1beta1/generated.pb.go | 731 + .../pkg/apis/storage/v1beta1/generated.proto | 64 + .../client-go/pkg/apis/storage/v1beta1/register.go | 50 + .../pkg/apis/storage/v1beta1/types.generated.go | 985 + .../client-go/pkg/apis/storage/v1beta1/types.go | 57 + .../storage/v1beta1/types_swagger_doc_generated.go | 51 + .../storage/v1beta1/zz_generated.conversion.go | 89 + .../apis/storage/v1beta1/zz_generated.deepcopy.go | 80 + .../apis/storage/v1beta1/zz_generated.defaults.go | 32 + .../pkg/apis/storage/zz_generated.deepcopy.go | 80 + .../vendor/k8s.io/client-go/pkg/util/doc.go | 20 + .../k8s.io/client-go/pkg/util/parsers/parsers.go | 54 + .../vendor/k8s.io/client-go/pkg/util/template.go | 48 + .../vendor/k8s.io/client-go/pkg/util/umask.go | 27 + .../k8s.io/client-go/pkg/util/umask_windows.go | 27 + .../vendor/k8s.io/client-go/pkg/util/util.go | 131 + .../vendor/k8s.io/client-go/pkg/version/base.go | 59 + .../vendor/k8s.io/client-go/pkg/version/doc.go | 19 + .../vendor/k8s.io/client-go/pkg/version/version.go | 42 + src/prometheus/vendor/k8s.io/client-go/rest/OWNERS | 24 + .../vendor/k8s.io/client-go/rest/client.go | 258 + .../vendor/k8s.io/client-go/rest/config.go | 384 + .../vendor/k8s.io/client-go/rest/plugin.go | 73 + .../vendor/k8s.io/client-go/rest/request.go | 1247 + .../vendor/k8s.io/client-go/rest/transport.go | 99 + .../vendor/k8s.io/client-go/rest/url_utils.go | 90 + .../vendor/k8s.io/client-go/rest/urlbackoff.go | 107 + .../vendor/k8s.io/client-go/rest/versions.go | 88 + .../vendor/k8s.io/client-go/rest/watch/decoder.go | 72 + .../vendor/k8s.io/client-go/rest/watch/encoder.go | 56 + .../vendor/k8s.io/client-go/testing/actions.go | 471 + .../vendor/k8s.io/client-go/testing/fake.go | 259 + .../vendor/k8s.io/client-go/testing/fixture.go | 518 + .../vendor/k8s.io/client-go/tools/cache/OWNERS | 41 + .../k8s.io/client-go/tools/cache/controller.go | 349 + .../k8s.io/client-go/tools/cache/delta_fifo.go | 681 + .../vendor/k8s.io/client-go/tools/cache/doc.go | 24 + .../client-go/tools/cache/expiration_cache.go | 208 + .../tools/cache/expiration_cache_fakes.go | 54 + .../client-go/tools/cache/fake_custom_store.go | 102 + .../vendor/k8s.io/client-go/tools/cache/fifo.go | 358 + .../vendor/k8s.io/client-go/tools/cache/index.go | 85 + .../vendor/k8s.io/client-go/tools/cache/listers.go | 160 + .../k8s.io/client-go/tools/cache/listwatch.go | 162 + .../client-go/tools/cache/mutation_detector.go | 135 + .../k8s.io/client-go/tools/cache/reflector.go | 421 + .../client-go/tools/cache/shared_informer.go | 581 + .../vendor/k8s.io/client-go/tools/cache/store.go | 240 + .../client-go/tools/cache/thread_safe_store.go | 288 + .../k8s.io/client-go/tools/cache/undelta_store.go | 83 + .../client-go/tools/clientcmd/api/helpers.go | 183 + .../client-go/tools/clientcmd/api/register.go | 46 + .../k8s.io/client-go/tools/clientcmd/api/types.go | 178 + .../vendor/k8s.io/client-go/tools/metrics/OWNERS | 7 + .../k8s.io/client-go/tools/metrics/metrics.go | 61 + .../vendor/k8s.io/client-go/transport/OWNERS | 7 + .../vendor/k8s.io/client-go/transport/cache.go | 88 + .../vendor/k8s.io/client-go/transport/config.go | 95 + .../k8s.io/client-go/transport/round_trippers.go | 436 + .../vendor/k8s.io/client-go/transport/transport.go | 141 + .../vendor/k8s.io/client-go/util/cert/cert.go | 215 + .../vendor/k8s.io/client-go/util/cert/csr.go | 75 + .../vendor/k8s.io/client-go/util/cert/io.go | 150 + .../vendor/k8s.io/client-go/util/cert/pem.go | 138 + .../vendor/k8s.io/client-go/util/clock/clock.go | 327 + .../k8s.io/client-go/util/flowcontrol/backoff.go | 149 + .../k8s.io/client-go/util/flowcontrol/throttle.go | 132 + .../k8s.io/client-go/util/integer/integer.go | 67 + .../util/workqueue/default_rate_limiters.go | 211 + .../client-go/util/workqueue/delaying_queue.go | 246 + .../vendor/k8s.io/client-go/util/workqueue/doc.go | 26 + .../k8s.io/client-go/util/workqueue/metrics.go | 195 + .../client-go/util/workqueue/parallelizer.go | 52 + .../k8s.io/client-go/util/workqueue/queue.go | 172 + .../util/workqueue/rate_limitting_queue.go | 69 + .../k8s.io/client-go/util/workqueue/timed_queue.go | 52 + src/prometheus/vendor/vendor.json | 2170 + src/prometheus/web/api/v1/api.go | 865 + src/prometheus/web/api/v1/api_test.go | 1084 + src/prometheus/web/api/v2/api.go | 247 + src/prometheus/web/federate.go | 224 + src/prometheus/web/federate_test.go | 241 + src/prometheus/web/ui/README.md | 12 + src/prometheus/web/ui/bindata.go | 1266 + src/prometheus/web/ui/static/css/alerts.css | 29 + src/prometheus/web/ui/static/css/graph.css | 198 + src/prometheus/web/ui/static/css/prom_console.css | 170 + src/prometheus/web/ui/static/css/prometheus.css | 26 + src/prometheus/web/ui/static/css/rules.css | 12 + src/prometheus/web/ui/static/css/targets.css | 42 + src/prometheus/web/ui/static/img/ajax-loader.gif | Bin 0 -> 847 bytes src/prometheus/web/ui/static/img/favicon.ico | Bin 0 -> 15086 bytes src/prometheus/web/ui/static/js/alerts.js | 31 + .../ui/static/js/graph/graph_template.handlebar | 133 + src/prometheus/web/ui/static/js/graph/index.js | 1105 + src/prometheus/web/ui/static/js/prom_console.js | 673 + src/prometheus/web/ui/static/js/targets.js | 54 + .../vendor/bootstrap-3.3.1/css/bootstrap-theme.css | 470 + .../bootstrap-3.3.1/css/bootstrap-theme.css.map | 1 + .../bootstrap-3.3.1/css/bootstrap-theme.min.css | 5 + .../vendor/bootstrap-3.3.1/css/bootstrap.css | 6332 ++ .../vendor/bootstrap-3.3.1/css/bootstrap.css.map | 1 + .../vendor/bootstrap-3.3.1/css/bootstrap.min.css | 5 + .../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20335 bytes .../fonts/glyphicons-halflings-regular.svg | 229 + .../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 41280 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23320 bytes .../static/vendor/bootstrap-3.3.1/js/bootstrap.js | 2320 + .../vendor/bootstrap-3.3.1/js/bootstrap.min.js | 7 + .../web/ui/static/vendor/bootstrap-3.3.1/js/npm.js | 13 + .../bootstrap3-typeahead.min.js | 21 + .../bootstrap-datetimepicker.min.css | 5 + .../bootstrap-datetimepicker.min.js | 217 + src/prometheus/web/ui/static/vendor/fuzzy/fuzzy.js | 182 + .../web/ui/static/vendor/js/jquery.hotkeys.js | 113 + .../web/ui/static/vendor/js/jquery.min.js | 4 + .../web/ui/static/vendor/js/jquery.selection.js | 354 + .../vendor/moment/moment-timezone-with-data.min.js | 7 + .../web/ui/static/vendor/moment/moment.min.js | 551 + .../web/ui/static/vendor/mustache/mustache.min.js | 1 + .../web/ui/static/vendor/rickshaw/rickshaw.min.css | 1 + .../web/ui/static/vendor/rickshaw/rickshaw.min.js | 3 + .../static/vendor/rickshaw/vendor/d3.layout.min.js | 1 + .../web/ui/static/vendor/rickshaw/vendor/d3.v3.js | 5 + src/prometheus/web/ui/templates/_base.html | 65 + src/prometheus/web/ui/templates/alerts.html | 75 + src/prometheus/web/ui/templates/config.html | 8 + src/prometheus/web/ui/templates/flags.html | 17 + src/prometheus/web/ui/templates/graph.html | 36 + src/prometheus/web/ui/templates/rules.html | 37 + .../web/ui/templates/service-discovery.html | 93 + src/prometheus/web/ui/templates/status.html | 97 + src/prometheus/web/ui/templates/targets.html | 79 + src/prometheus/web/web.go | 912 + src/prometheus/web/web_test.go | 304 + 2822 files changed, 1115325 insertions(+) create mode 100644 src/prometheus/.circleci/config.yml create mode 100644 src/prometheus/.dockerignore create mode 100644 src/prometheus/.github/ISSUE_TEMPLATE.md create mode 100644 src/prometheus/.gitignore create mode 100644 src/prometheus/.promu.yml create mode 100644 src/prometheus/.travis.yml create mode 100644 src/prometheus/CHANGELOG.md create mode 100644 src/prometheus/CONTRIBUTING.md create mode 100644 src/prometheus/Dockerfile create mode 100644 src/prometheus/LICENSE create mode 100644 src/prometheus/MAINTAINERS.md create mode 100644 src/prometheus/Makefile create mode 100644 src/prometheus/Makefile.common create mode 100644 src/prometheus/NOTICE create mode 100644 src/prometheus/README.md create mode 100644 src/prometheus/VERSION create mode 100644 src/prometheus/cmd/prometheus/fdlimits_default.go create mode 100644 src/prometheus/cmd/prometheus/fdlimits_windows.go create mode 100644 src/prometheus/cmd/prometheus/main.go create mode 100644 src/prometheus/cmd/prometheus/main_test.go create mode 100644 src/prometheus/cmd/prometheus/uname_default.go create mode 100644 src/prometheus/cmd/prometheus/uname_linux.go create mode 100644 src/prometheus/cmd/prometheus/uname_linux_int8.go create mode 100644 src/prometheus/cmd/prometheus/uname_linux_uint8.go create mode 100644 src/prometheus/cmd/promtool/main.go create mode 100644 src/prometheus/code-of-conduct.md create mode 100644 src/prometheus/config/config.go create mode 100644 src/prometheus/config/config_default_test.go create mode 100644 src/prometheus/config/config_test.go create mode 100644 src/prometheus/config/config_windows_test.go create mode 100644 src/prometheus/config/testdata/bearertoken.bad.yml create mode 100644 src/prometheus/config/testdata/bearertoken_basicauth.bad.yml create mode 100644 src/prometheus/config/testdata/conf.good.yml create mode 100644 src/prometheus/config/testdata/ec2_filters_empty_values.bad.yml create mode 100644 src/prometheus/config/testdata/first.rules create mode 100644 src/prometheus/config/testdata/global_timeout.good.yml create mode 100644 src/prometheus/config/testdata/jobname.bad.yml create mode 100644 src/prometheus/config/testdata/jobname_dup.bad.yml create mode 100644 src/prometheus/config/testdata/kubernetes_bearertoken.bad.yml create mode 100644 src/prometheus/config/testdata/kubernetes_bearertoken_basicauth.bad.yml create mode 100644 src/prometheus/config/testdata/kubernetes_namespace_discovery.bad.yml create mode 100644 src/prometheus/config/testdata/kubernetes_role.bad.yml create mode 100644 src/prometheus/config/testdata/labeldrop.bad.yml create mode 100644 src/prometheus/config/testdata/labeldrop2.bad.yml create mode 100644 src/prometheus/config/testdata/labeldrop3.bad.yml create mode 100644 src/prometheus/config/testdata/labeldrop4.bad.yml create mode 100644 src/prometheus/config/testdata/labeldrop5.bad.yml create mode 100644 src/prometheus/config/testdata/labelkeep.bad.yml create mode 100644 src/prometheus/config/testdata/labelkeep2.bad.yml create mode 100644 src/prometheus/config/testdata/labelkeep3.bad.yml create mode 100644 src/prometheus/config/testdata/labelkeep4.bad.yml create mode 100644 src/prometheus/config/testdata/labelkeep5.bad.yml create mode 100644 src/prometheus/config/testdata/labelmap.bad.yml create mode 100644 src/prometheus/config/testdata/labelname.bad.yml create mode 100644 src/prometheus/config/testdata/labelname2.bad.yml create mode 100644 src/prometheus/config/testdata/marathon_authtoken_authtokenfile.bad.yml create mode 100644 src/prometheus/config/testdata/marathon_authtoken_basicauth.bad.yml create mode 100644 src/prometheus/config/testdata/marathon_authtoken_bearertoken.bad.yml create mode 100644 src/prometheus/config/testdata/marathon_no_servers.bad.yml create mode 100644 src/prometheus/config/testdata/modulus_missing.bad.yml create mode 100644 src/prometheus/config/testdata/regex.bad.yml create mode 100644 src/prometheus/config/testdata/remote_read_url_missing.bad.yml create mode 100644 src/prometheus/config/testdata/remote_write_url_missing.bad.yml create mode 100644 src/prometheus/config/testdata/rules.bad.yml create mode 100644 src/prometheus/config/testdata/rules_abs_path.good.yml create mode 100644 src/prometheus/config/testdata/rules_abs_path_windows.good.yml create mode 100644 src/prometheus/config/testdata/scrape_interval.bad.yml create mode 100644 src/prometheus/config/testdata/section_key_dup.bad.yml create mode 100644 src/prometheus/config/testdata/static_config.bad.json create mode 100644 src/prometheus/config/testdata/static_config.bad.yml create mode 100644 src/prometheus/config/testdata/target_label_hashmod_missing.bad.yml create mode 100644 src/prometheus/config/testdata/target_label_missing.bad.yml create mode 100644 src/prometheus/config/testdata/unknown_attr.bad.yml create mode 100644 src/prometheus/config/testdata/unknown_global_attr.bad.yml create mode 100644 src/prometheus/config/testdata/url_in_targetgroup.bad.yml create mode 100644 src/prometheus/console_libraries/menu.lib create mode 100644 src/prometheus/console_libraries/prom.lib create mode 100644 src/prometheus/consoles/index.html.example create mode 100644 src/prometheus/consoles/node-cpu.html create mode 100644 src/prometheus/consoles/node-disk.html create mode 100644 src/prometheus/consoles/node-overview.html create mode 100644 src/prometheus/consoles/node.html create mode 100644 src/prometheus/consoles/prometheus-overview.html create mode 100644 src/prometheus/consoles/prometheus.html create mode 100644 src/prometheus/discovery/README.md create mode 100644 src/prometheus/discovery/azure/azure.go create mode 100644 src/prometheus/discovery/config/config.go create mode 100644 src/prometheus/discovery/consul/consul.go create mode 100644 src/prometheus/discovery/consul/consul_test.go create mode 100644 src/prometheus/discovery/dns/dns.go create mode 100644 src/prometheus/discovery/ec2/ec2.go create mode 100644 src/prometheus/discovery/file/file.go create mode 100644 src/prometheus/discovery/file/file_test.go create mode 100644 src/prometheus/discovery/file/fixtures/invalid_nil.json create mode 100644 src/prometheus/discovery/file/fixtures/invalid_nil.yml create mode 100644 src/prometheus/discovery/file/fixtures/valid.json create mode 100644 src/prometheus/discovery/file/fixtures/valid.yml create mode 100644 src/prometheus/discovery/gce/gce.go create mode 100644 src/prometheus/discovery/kubernetes/endpoints.go create mode 100644 src/prometheus/discovery/kubernetes/endpoints_test.go create mode 100644 src/prometheus/discovery/kubernetes/ingress.go create mode 100644 src/prometheus/discovery/kubernetes/ingress_test.go create mode 100644 src/prometheus/discovery/kubernetes/kubernetes.go create mode 100644 src/prometheus/discovery/kubernetes/kubernetes_test.go create mode 100644 src/prometheus/discovery/kubernetes/node.go create mode 100644 src/prometheus/discovery/kubernetes/node_test.go create mode 100644 src/prometheus/discovery/kubernetes/pod.go create mode 100644 src/prometheus/discovery/kubernetes/pod_test.go create mode 100644 src/prometheus/discovery/kubernetes/service.go create mode 100644 src/prometheus/discovery/kubernetes/service_test.go create mode 100644 src/prometheus/discovery/manager.go create mode 100644 src/prometheus/discovery/manager_test.go create mode 100644 src/prometheus/discovery/marathon/marathon.go create mode 100644 src/prometheus/discovery/marathon/marathon_test.go create mode 100644 src/prometheus/discovery/openstack/hypervisor.go create mode 100644 src/prometheus/discovery/openstack/hypervisor_test.go create mode 100644 src/prometheus/discovery/openstack/instance.go create mode 100644 src/prometheus/discovery/openstack/instance_test.go create mode 100644 src/prometheus/discovery/openstack/mock.go create mode 100644 src/prometheus/discovery/openstack/openstack.go create mode 100644 src/prometheus/discovery/targetgroup/targetgroup.go create mode 100644 src/prometheus/discovery/targetgroup/targetgroup_test.go create mode 100644 src/prometheus/discovery/triton/triton.go create mode 100644 src/prometheus/discovery/triton/triton_test.go create mode 100644 src/prometheus/discovery/zookeeper/zookeeper.go create mode 100644 src/prometheus/docs/configuration/alerting_rules.md create mode 100644 src/prometheus/docs/configuration/configuration.md create mode 100644 src/prometheus/docs/configuration/index.md create mode 100644 src/prometheus/docs/configuration/recording_rules.md create mode 100644 src/prometheus/docs/configuration/template_examples.md create mode 100644 src/prometheus/docs/configuration/template_reference.md create mode 100644 src/prometheus/docs/federation.md create mode 100644 src/prometheus/docs/getting_started.md create mode 100644 src/prometheus/docs/images/remote_integrations.png create mode 100644 src/prometheus/docs/index.md create mode 100644 src/prometheus/docs/installation.md create mode 100644 src/prometheus/docs/migration.md create mode 100644 src/prometheus/docs/querying/api.md create mode 100644 src/prometheus/docs/querying/basics.md create mode 100644 src/prometheus/docs/querying/examples.md create mode 100644 src/prometheus/docs/querying/functions.md create mode 100644 src/prometheus/docs/querying/index.md create mode 100644 src/prometheus/docs/querying/operators.md create mode 100644 src/prometheus/docs/stability.md create mode 100644 src/prometheus/docs/storage.md create mode 100644 src/prometheus/documentation/dev/api/swagger.json create mode 100644 src/prometheus/documentation/examples/custom-sd/README.md create mode 100644 src/prometheus/documentation/examples/custom-sd/adapter-usage/main.go create mode 100644 src/prometheus/documentation/examples/custom-sd/adapter/adapter.go create mode 100644 src/prometheus/documentation/examples/kubernetes-rabbitmq/README.md create mode 100644 src/prometheus/documentation/examples/kubernetes-rabbitmq/rc.yml create mode 100644 src/prometheus/documentation/examples/kubernetes-rabbitmq/svc.yml create mode 100644 src/prometheus/documentation/examples/prometheus-kubernetes.yml create mode 100644 src/prometheus/documentation/examples/prometheus.yml create mode 100644 src/prometheus/documentation/examples/rbac-setup.yml create mode 100644 src/prometheus/documentation/examples/remote_storage/example_write_adapter/README.md create mode 100644 src/prometheus/documentation/examples/remote_storage/example_write_adapter/server.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/README.md create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/main.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go create mode 100644 src/prometheus/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go create mode 100644 src/prometheus/documentation/images/architecture.svg create mode 100644 src/prometheus/documentation/images/architecture.xml create mode 100644 src/prometheus/documentation/images/diagram_note.md create mode 100644 src/prometheus/notifier/notifier.go create mode 100644 src/prometheus/notifier/notifier_test.go create mode 100644 src/prometheus/pkg/labels/labels.go create mode 100644 src/prometheus/pkg/labels/matcher.go create mode 100644 src/prometheus/pkg/labels/matcher_test.go create mode 100644 src/prometheus/pkg/pool/pool.go create mode 100644 src/prometheus/pkg/relabel/relabel.go create mode 100644 src/prometheus/pkg/relabel/relabel_test.go create mode 100644 src/prometheus/pkg/rulefmt/rulefmt.go create mode 100644 src/prometheus/pkg/rulefmt/rulefmt_test.go create mode 100644 src/prometheus/pkg/rulefmt/testdata/bad_annotation.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/bad_expr.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/bad_lname.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/duplicate_grp.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/invalid_record_name.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/no_rec_alert.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/noexpr.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/record_and_alert.bad.yaml create mode 100644 src/prometheus/pkg/rulefmt/testdata/test.yaml create mode 100644 src/prometheus/pkg/textparse/lex.l create mode 100644 src/prometheus/pkg/textparse/lex.l.go create mode 100644 src/prometheus/pkg/textparse/parse.go create mode 100644 src/prometheus/pkg/textparse/parse_test.go create mode 100644 src/prometheus/pkg/textparse/testdata.nometa.txt create mode 100644 src/prometheus/pkg/textparse/testdata.txt create mode 100644 src/prometheus/pkg/timestamp/timestamp.go create mode 100644 src/prometheus/pkg/value/value.go create mode 100644 src/prometheus/prompb/remote.pb.go create mode 100644 src/prometheus/prompb/remote.proto create mode 100644 src/prometheus/prompb/rpc.pb.go create mode 100644 src/prometheus/prompb/rpc.pb.gw.go create mode 100644 src/prometheus/prompb/rpc.proto create mode 100644 src/prometheus/prompb/types.pb.go create mode 100644 src/prometheus/prompb/types.proto create mode 100644 src/prometheus/promql/ast.go create mode 100644 src/prometheus/promql/bench_test.go create mode 100644 src/prometheus/promql/engine.go create mode 100644 src/prometheus/promql/engine_test.go create mode 100644 src/prometheus/promql/functions.go create mode 100644 src/prometheus/promql/functions_test.go create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_1 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_10 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_11 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_12 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_13 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_14 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_15 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_16 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_17 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_18 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_19 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_2 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_20 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_21 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_22 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_23 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_24 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_25 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_26 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_27 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_28 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_29 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_3 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_30 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_31 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_32 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_33 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_34 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_4 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_5 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_6 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_7 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_8 create mode 100644 src/prometheus/promql/fuzz-data/ParseExpr/corpus/from_tests_9 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/982cbe5ad899f03c630b1a21876a206707ea3dc9 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_0 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_1 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_2 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_3 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_4 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_5 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_6 create mode 100644 src/prometheus/promql/fuzz-data/ParseMetric/corpus/exposition_formats_7 create mode 100644 src/prometheus/promql/fuzz.go create mode 100644 src/prometheus/promql/lex.go create mode 100644 src/prometheus/promql/lex_test.go create mode 100644 src/prometheus/promql/parse.go create mode 100644 src/prometheus/promql/parse_test.go create mode 100644 src/prometheus/promql/printer.go create mode 100644 src/prometheus/promql/printer_test.go create mode 100644 src/prometheus/promql/promql_test.go create mode 100644 src/prometheus/promql/quantile.go create mode 100644 src/prometheus/promql/test.go create mode 100644 src/prometheus/promql/testdata/aggregators.test create mode 100644 src/prometheus/promql/testdata/functions.test create mode 100644 src/prometheus/promql/testdata/histograms.test create mode 100644 src/prometheus/promql/testdata/legacy.test create mode 100644 src/prometheus/promql/testdata/literals.test create mode 100644 src/prometheus/promql/testdata/operators.test create mode 100644 src/prometheus/promql/testdata/selectors.test create mode 100644 src/prometheus/promql/testdata/staleness.test create mode 100644 src/prometheus/promql/value.go create mode 100644 src/prometheus/relabel/relabel.go create mode 100644 src/prometheus/relabel/relabel_test.go create mode 100644 src/prometheus/rules/alerting.go create mode 100644 src/prometheus/rules/alerting_test.go create mode 100644 src/prometheus/rules/manager.go create mode 100644 src/prometheus/rules/manager_test.go create mode 100644 src/prometheus/rules/recording.go create mode 100644 src/prometheus/rules/recording_test.go create mode 100644 src/prometheus/scrape/helpers_test.go create mode 100644 src/prometheus/scrape/manager.go create mode 100644 src/prometheus/scrape/manager_test.go create mode 100644 src/prometheus/scrape/scrape.go create mode 100644 src/prometheus/scrape/scrape_test.go create mode 100644 src/prometheus/scrape/target.go create mode 100644 src/prometheus/scrape/target_test.go create mode 100644 src/prometheus/scrape/testdata/bearertoken.txt create mode 100644 src/prometheus/scrape/testdata/ca.cer create mode 100644 src/prometheus/scrape/testdata/ca.key create mode 100644 src/prometheus/scrape/testdata/client.cer create mode 100644 src/prometheus/scrape/testdata/client.key create mode 100644 src/prometheus/scrape/testdata/server.cer create mode 100644 src/prometheus/scrape/testdata/server.key create mode 100644 src/prometheus/scrape/testdata/servername.cer create mode 100644 src/prometheus/scrape/testdata/servername.key create mode 100755 src/prometheus/scripts/genproto.sh create mode 100644 src/prometheus/storage/buffer.go create mode 100644 src/prometheus/storage/buffer_test.go create mode 100644 src/prometheus/storage/fanout.go create mode 100644 src/prometheus/storage/fanout_test.go create mode 100644 src/prometheus/storage/interface.go create mode 100644 src/prometheus/storage/noop.go create mode 100644 src/prometheus/storage/remote/client.go create mode 100644 src/prometheus/storage/remote/client_test.go create mode 100644 src/prometheus/storage/remote/codec.go create mode 100644 src/prometheus/storage/remote/codec_test.go create mode 100644 src/prometheus/storage/remote/ewma.go create mode 100644 src/prometheus/storage/remote/queue_manager.go create mode 100644 src/prometheus/storage/remote/queue_manager_test.go create mode 100644 src/prometheus/storage/remote/read.go create mode 100644 src/prometheus/storage/remote/read_test.go create mode 100644 src/prometheus/storage/remote/storage.go create mode 100644 src/prometheus/storage/remote/write.go create mode 100644 src/prometheus/storage/tsdb/tsdb.go create mode 100644 src/prometheus/template/template.go create mode 100644 src/prometheus/template/template_test.go create mode 100644 src/prometheus/util/httputil/compression.go create mode 100644 src/prometheus/util/promlint/promlint.go create mode 100644 src/prometheus/util/promlint/promlint_test.go create mode 100644 src/prometheus/util/stats/query_stats.go create mode 100644 src/prometheus/util/stats/stats_test.go create mode 100644 src/prometheus/util/stats/timer.go create mode 100644 src/prometheus/util/strutil/quote.go create mode 100644 src/prometheus/util/strutil/quote_test.go create mode 100644 src/prometheus/util/strutil/strconv.go create mode 100644 src/prometheus/util/strutil/strconv_test.go create mode 100644 src/prometheus/util/testutil/directory.go create mode 100644 src/prometheus/util/testutil/error.go create mode 100644 src/prometheus/util/testutil/roundtrip.go create mode 100644 src/prometheus/util/testutil/storage.go create mode 100644 src/prometheus/util/testutil/testing.go create mode 100644 src/prometheus/util/treecache/treecache.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/LICENSE create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go create mode 100644 src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/config.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/token.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/to/convert.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go create mode 100644 src/prometheus/vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 src/prometheus/vendor/github.com/PuerkitoBio/purell/LICENSE create mode 100644 src/prometheus/vendor/github.com/PuerkitoBio/purell/purell.go create mode 100644 src/prometheus/vendor/github.com/PuerkitoBio/urlesc/LICENSE create mode 100644 src/prometheus/vendor/github.com/PuerkitoBio/urlesc/urlesc.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/LICENSE create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/README.md create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/doc.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/exec.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/funcs.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/helper.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/parse/lex.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/parse/node.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/parse/parse.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/template/template.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/units/COPYING create mode 100644 src/prometheus/vendor/github.com/alecthomas/units/README.md create mode 100644 src/prometheus/vendor/github.com/alecthomas/units/bytes.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/units/doc.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/units/si.go create mode 100644 src/prometheus/vendor/github.com/alecthomas/units/util.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 src/prometheus/vendor/github.com/beorn7/perks/LICENSE create mode 100644 src/prometheus/vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 src/prometheus/vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 src/prometheus/vendor/github.com/cespare/xxhash/LICENSE.txt create mode 100644 src/prometheus/vendor/github.com/cespare/xxhash/README.md create mode 100644 src/prometheus/vendor/github.com/cespare/xxhash/xxhash.go create mode 100644 src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.go create mode 100644 src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.s create mode 100644 src/prometheus/vendor/github.com/cespare/xxhash/xxhash_other.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/LICENSE create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/README.md create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/buffer.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/cmux.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/doc.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/matchers.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cmux/patricia.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/LICENSE create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/httputil/http.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/clone.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/jsonpb_marshal.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshal.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshaler.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_deadlock.go create mode 100644 src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_sync.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 src/prometheus/vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/LICENSE create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/README.md create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/claims.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/doc.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/errors.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/hmac.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/map_claims.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/none.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/parser.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/signing_method.go create mode 100644 src/prometheus/vendor/github.com/dgrijalva/jwt-go/token.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/LICENSE create mode 100644 src/prometheus/vendor/github.com/docker/distribution/digest/digest.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/digest/digester.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/digest/doc.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/digest/set.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/digest/verifiers.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/reference/reference.go create mode 100644 src/prometheus/vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/CHANGES.md create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/LICENSE create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/Srcfile create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/compress.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/compressor_cache.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/compressor_pools.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/compressors.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/constants.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/container.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/cors_filter.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/curly.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/curly_route.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/doc.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/entity_accessors.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/filter.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/jsr311.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/log/log.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/logger.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/mime.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/options_filter.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/parameter.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/path_expression.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/request.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/response.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/route.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/route_builder.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/router.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/service_error.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/config.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_builder.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_list.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/web_service.go create mode 100644 src/prometheus/vendor/github.com/emicklei/go-restful/web_service_container.go create mode 100644 src/prometheus/vendor/github.com/ghodss/yaml/LICENSE create mode 100644 src/prometheus/vendor/github.com/ghodss/yaml/fields.go create mode 100644 src/prometheus/vendor/github.com/ghodss/yaml/yaml.go create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/Makefile create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/README.md create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/error.go create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/ini.go create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/key.go create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/parser.go create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/section.go create mode 100644 src/prometheus/vendor/github.com/go-ini/ini/struct.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/README.md create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/doc.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/json_logger.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/level/doc.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/level/level.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/log.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/logfmt_logger.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/nop_logger.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/stdlib.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/sync.go create mode 100644 src/prometheus/vendor/github.com/go-kit/kit/log/value.go create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/README.md create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/decode.go create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/doc.go create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/encode.go create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/fuzz.go create mode 100644 src/prometheus/vendor/github.com/go-logfmt/logfmt/jsonstring.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md create mode 100644 src/prometheus/vendor/github.com/go-openapi/jsonpointer/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-openapi/jsonpointer/pointer.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md create mode 100644 src/prometheus/vendor/github.com/go-openapi/jsonreference/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-openapi/jsonreference/reference.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/bindata.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/contact_info.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/expander.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/external_docs.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/header.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/info.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/items.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/license.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/operation.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/parameter.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/path_item.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/paths.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/ref.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/response.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/responses.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/schema.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/security_scheme.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/spec.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/swagger.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/tag.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/spec/xml_object.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/LICENSE create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/convert.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/convert_types.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/json.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/loading.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/net.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/path.go create mode 100644 src/prometheus/vendor/github.com/go-openapi/swag/util.go create mode 100644 src/prometheus/vendor/github.com/go-stack/stack/LICENSE.md create mode 100644 src/prometheus/vendor/github.com/go-stack/stack/README.md create mode 100644 src/prometheus/vendor/github.com/go-stack/stack/stack.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/LICENSE create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/Makefile create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/any.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/any.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/doc.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/duration.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/duration.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/duration_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/empty.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/field_mask.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/struct.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/timestamp.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/timestamp.pb.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go create mode 100644 src/prometheus/vendor/github.com/gogo/protobuf/types/wrappers.pb.go create mode 100644 src/prometheus/vendor/github.com/golang/glog/LICENSE create mode 100644 src/prometheus/vendor/github.com/golang/glog/README create mode 100644 src/prometheus/vendor/github.com/golang/glog/glog.go create mode 100644 src/prometheus/vendor/github.com/golang/glog/glog_file.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/LICENSE create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/text.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 src/prometheus/vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 src/prometheus/vendor/github.com/golang/snappy/AUTHORS create mode 100644 src/prometheus/vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 src/prometheus/vendor/github.com/golang/snappy/LICENSE create mode 100644 src/prometheus/vendor/github.com/golang/snappy/README create mode 100644 src/prometheus/vendor/github.com/golang/snappy/decode.go create mode 100644 src/prometheus/vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 src/prometheus/vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 src/prometheus/vendor/github.com/golang/snappy/decode_other.go create mode 100644 src/prometheus/vendor/github.com/golang/snappy/encode.go create mode 100644 src/prometheus/vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 src/prometheus/vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 src/prometheus/vendor/github.com/golang/snappy/encode_other.go create mode 100644 src/prometheus/vendor/github.com/golang/snappy/snappy.go create mode 100644 src/prometheus/vendor/github.com/google/gofuzz/CONTRIBUTING.md create mode 100644 src/prometheus/vendor/github.com/google/gofuzz/LICENSE create mode 100644 src/prometheus/vendor/github.com/google/gofuzz/doc.go create mode 100644 src/prometheus/vendor/github.com/google/gofuzz/fuzz.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/FAQ.md create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/LICENSE create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/MIGRATING.md create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/README.md create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/auth_options.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/endpoint_search.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/errors.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/client.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/errors.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/pagination/http.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/pagination/linked.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/pagination/marker.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/pagination/pager.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/pagination/single.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/params.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/provider_client.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/results.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/service_client.go create mode 100644 src/prometheus/vendor/github.com/gophercloud/gophercloud/util.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go create mode 100644 src/prometheus/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/LICENSE create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/README.md create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/acl.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/agent.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/api.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/catalog.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/coordinate.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/event.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/health.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/kv.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/lock.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/operator.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/prepared_query.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/raw.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/semaphore.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/session.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/snapshot.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/consul/api/status.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 src/prometheus/vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 src/prometheus/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/serf/LICENSE create mode 100644 src/prometheus/vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 src/prometheus/vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/LICENSE create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/client/v2/client.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/client/v2/udp.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/consistency.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/inline_fnv.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/points.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/rows.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/statistic.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/models/time.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go create mode 100644 src/prometheus/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go create mode 100644 src/prometheus/vendor/github.com/influxdb/influxdb/LICENSE create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 src/prometheus/vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/Gopkg.toml create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/LICENSE create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/README.md create mode 100755 src/prometheus/vendor/github.com/json-iterator/go/build.sh create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_adapter.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_array.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_bool.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_float.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_int32.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_int64.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_invalid.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_nil.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_number.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_object.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_string.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_uint32.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_any_uint64.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_config.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_array.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_float.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_int.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_object.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_skip.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_iter_string.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_json_number.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_pool.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_array.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_extension.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_map.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_native.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_object.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_optional.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_slice.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_stream.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_stream_float.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_stream_int.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/feature_stream_string.go create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 src/prometheus/vendor/github.com/json-iterator/go/jsoniter.go create mode 100755 src/prometheus/vendor/github.com/json-iterator/go/test.sh create mode 100644 src/prometheus/vendor/github.com/juju/ratelimit/LICENSE create mode 100644 src/prometheus/vendor/github.com/juju/ratelimit/ratelimit.go create mode 100644 src/prometheus/vendor/github.com/juju/ratelimit/reader.go create mode 100644 src/prometheus/vendor/github.com/julienschmidt/httprouter/LICENSE create mode 100644 src/prometheus/vendor/github.com/julienschmidt/httprouter/README.md create mode 100644 src/prometheus/vendor/github.com/julienschmidt/httprouter/path.go create mode 100644 src/prometheus/vendor/github.com/julienschmidt/httprouter/router.go create mode 100644 src/prometheus/vendor/github.com/julienschmidt/httprouter/tree.go create mode 100644 src/prometheus/vendor/github.com/kr/logfmt/Readme create mode 100644 src/prometheus/vendor/github.com/kr/logfmt/decode.go create mode 100644 src/prometheus/vendor/github.com/kr/logfmt/scanner.go create mode 100644 src/prometheus/vendor/github.com/kr/logfmt/unquote.go create mode 100644 src/prometheus/vendor/github.com/mailru/easyjson/LICENSE create mode 100644 src/prometheus/vendor/github.com/mailru/easyjson/buffer/pool.go create mode 100644 src/prometheus/vendor/github.com/mailru/easyjson/jlexer/error.go create mode 100644 src/prometheus/vendor/github.com/mailru/easyjson/jlexer/lexer.go create mode 100644 src/prometheus/vendor/github.com/mailru/easyjson/jwriter/writer.go create mode 100644 src/prometheus/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 src/prometheus/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 src/prometheus/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 src/prometheus/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 src/prometheus/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/AUTHORS create mode 100644 src/prometheus/vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 src/prometheus/vendor/github.com/miekg/dns/COPYRIGHT create mode 100644 src/prometheus/vendor/github.com/miekg/dns/Gopkg.lock create mode 100644 src/prometheus/vendor/github.com/miekg/dns/Gopkg.toml create mode 100644 src/prometheus/vendor/github.com/miekg/dns/LICENSE create mode 100644 src/prometheus/vendor/github.com/miekg/dns/Makefile.fuzz create mode 100644 src/prometheus/vendor/github.com/miekg/dns/Makefile.release create mode 100644 src/prometheus/vendor/github.com/miekg/dns/README.md create mode 100644 src/prometheus/vendor/github.com/miekg/dns/client.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/clientconfig.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/compress_generate.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/dane.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/defaults.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/dns.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/dnssec.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/doc.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/edns.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/format.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/fuzz.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/generate.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/labels.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/msg.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/msg_generate.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/nsecx.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/privaterr.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/rawmsg.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/reverse.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/sanitize.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/scan.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/scan_rr.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/scanner.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/server.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/sig0.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/singleinflight.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/smimea.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/tlsa.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/tsig.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/types.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/types_generate.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/udp.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/udp_windows.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/update.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/version.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/xfr.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/zcompress.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/zmsg.go create mode 100644 src/prometheus/vendor/github.com/miekg/dns/ztypes.go create mode 100644 src/prometheus/vendor/github.com/mwitkow/go-conntrack/LICENSE create mode 100644 src/prometheus/vendor/github.com/mwitkow/go-conntrack/README.md create mode 100644 src/prometheus/vendor/github.com/mwitkow/go-conntrack/dialer_reporter.go create mode 100644 src/prometheus/vendor/github.com/mwitkow/go-conntrack/dialer_wrapper.go create mode 100644 src/prometheus/vendor/github.com/mwitkow/go-conntrack/listener_reporter.go create mode 100644 src/prometheus/vendor/github.com/mwitkow/go-conntrack/listener_wrapper.go create mode 100644 src/prometheus/vendor/github.com/oklog/oklog/LICENSE create mode 100644 src/prometheus/vendor/github.com/oklog/oklog/pkg/group/group.go create mode 100644 src/prometheus/vendor/github.com/oklog/ulid/AUTHORS.md create mode 100644 src/prometheus/vendor/github.com/oklog/ulid/CHANGELOG.md create mode 100644 src/prometheus/vendor/github.com/oklog/ulid/CONTRIBUTING.md create mode 100644 src/prometheus/vendor/github.com/oklog/ulid/LICENSE create mode 100644 src/prometheus/vendor/github.com/oklog/ulid/README.md create mode 100644 src/prometheus/vendor/github.com/oklog/ulid/ulid.go create mode 100644 src/prometheus/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE create mode 100644 src/prometheus/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go create mode 100644 src/prometheus/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go create mode 100644 src/prometheus/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/LICENSE create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/Makefile create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/README.md create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/ext/tags.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/globaltracer.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/gocontext.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/log/field.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/log/util.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/noop.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/propagation.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/span.go create mode 100644 src/prometheus/vendor/github.com/opentracing/opentracing-go/tracer.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/LICENSE create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/README.md create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.3.c create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.3.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.4.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.4.s create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.5.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.5plus.s create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.5plus_arm.s create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.6plus.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_go1.9plus.go create mode 100644 src/prometheus/vendor/github.com/petermattis/goid/goid_slow.go create mode 100644 src/prometheus/vendor/github.com/pkg/errors/LICENSE create mode 100644 src/prometheus/vendor/github.com/pkg/errors/README.md create mode 100644 src/prometheus/vendor/github.com/pkg/errors/appveyor.yml create mode 100644 src/prometheus/vendor/github.com/pkg/errors/errors.go create mode 100644 src/prometheus/vendor/github.com/pkg/errors/stack.go create mode 100644 src/prometheus/vendor/github.com/pmezard/go-difflib/LICENSE create mode 100644 src/prometheus/vendor/github.com/pmezard/go-difflib/difflib/difflib.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/api/client.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 src/prometheus/vendor/github.com/prometheus/client_model/LICENSE create mode 100644 src/prometheus/vendor/github.com/prometheus/client_model/NOTICE create mode 100644 src/prometheus/vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/LICENSE create mode 100644 src/prometheus/vendor/github.com/prometheus/common/NOTICE create mode 100644 src/prometheus/vendor/github.com/prometheus/common/config/config.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/config/http_config.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 src/prometheus/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/alert.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/fnv.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/labels.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/labelset.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/metric.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/model.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/signature.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/silence.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/time.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/model/value.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/promlog/flag/flag.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/promlog/log.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/route/route.go create mode 100644 src/prometheus/vendor/github.com/prometheus/common/version/info.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/AUTHORS.md create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/LICENSE create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/Makefile create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/NOTICE create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/README.md create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/doc.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/fs.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/proc.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 src/prometheus/vendor/github.com/prometheus/procfs/stat.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/LICENSE create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/README.md create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/block.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/chunkenc/chunk.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/chunkenc/xor.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/chunks/chunks.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/compact.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/db.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/encoding_helpers.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/dir_unix.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/dir_windows.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/flock.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/flock_plan9.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/flock_solaris.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/flock_unix.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/flock_windows.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/mmap.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/mmap_unix.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/mmap_windows.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/preallocate.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/preallocate_darwin.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/preallocate_linux.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/preallocate_other.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/sync.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/sync_darwin.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/fileutil/sync_linux.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/head.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/index/encoding_helpers.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/index/index.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/index/postings.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/labels/labels.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/labels/selector.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/querier.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/repair.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/tombstones.go create mode 100644 src/prometheus/vendor/github.com/prometheus/tsdb/wal.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/LICENSE create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/conn.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/constants.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/flw.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/lock.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/server_help.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/server_java.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/structs.go create mode 100644 src/prometheus/vendor/github.com/samuel/go-zookeeper/zk/util.go create mode 100644 src/prometheus/vendor/github.com/sasha-s/go-deadlock/LICENSE create mode 100644 src/prometheus/vendor/github.com/sasha-s/go-deadlock/Readme.md create mode 100644 src/prometheus/vendor/github.com/sasha-s/go-deadlock/deadlock.go create mode 100644 src/prometheus/vendor/github.com/sasha-s/go-deadlock/stacktraces.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/LICENSE create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/README.md create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/bool.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/count.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/duration.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/flag.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/float32.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/float64.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/golangflag.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/int.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/int32.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/int64.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/int8.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/int_slice.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/ip.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/ipmask.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/ipnet.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/string.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/string_array.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/string_slice.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/uint.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/uint16.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/uint32.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/uint64.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/uint8.go create mode 100644 src/prometheus/vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/LICENSE create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/assertion_forward.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/assertions.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/doc.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/errors.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/forward_assertions.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/assert/http_assertions.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/doc.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/forward_requirements.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/require.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/require.go.tmpl create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/require_forward.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/require/requirements.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/suite/doc.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/suite/interfaces.go create mode 100644 src/prometheus/vendor/github.com/stretchr/testify/suite/suite.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/LICENSE create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/0doc.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/binc.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/cbor.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/decode.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/encode.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/fast-path.generated.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/fast-path.not.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/gen-helper.generated.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/gen.generated.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/gen.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/helper.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/helper_internal.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/helper_unsafe.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/json.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/msgpack.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/noop.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/prebuild.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/rpc.go create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/simple.go create mode 100755 src/prometheus/vendor/github.com/ugorji/go/codec/test.py create mode 100644 src/prometheus/vendor/github.com/ugorji/go/codec/time.go create mode 100644 src/prometheus/vendor/golang.org/x/crypto/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/crypto/PATENTS create mode 100644 src/prometheus/vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 src/prometheus/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 src/prometheus/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 src/prometheus/vendor/golang.org/x/net/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/net/PATENTS create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/asm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/constants.go create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/doc.go create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/instructions.go create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/setter.go create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/vm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/context.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/go17.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/go19.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/pre_go17.go create mode 100644 src/prometheus/vendor/golang.org/x/net/context/pre_go19.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/Dockerfile create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/Makefile create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/README create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/ciphers.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/configure_transport.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/databuffer.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/errors.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/flow.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/frame.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/go16.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/go17.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/go17_not18.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/go18.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/go19.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/gotrack.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/headermap.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/http2.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/not_go16.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/not_go17.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/not_go18.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/not_go19.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/pipe.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/server.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/transport.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/write.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/writesched.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 src/prometheus/vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 src/prometheus/vendor/golang.org/x/net/idna/idna.go create mode 100644 src/prometheus/vendor/golang.org/x/net/idna/punycode.go create mode 100644 src/prometheus/vendor/golang.org/x/net/idna/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/net/idna/trie.go create mode 100644 src/prometheus/vendor/golang.org/x/net/idna/trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/iana/const.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/rawconn_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/reflect.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_386.s create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/batch.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/control.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/control_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/control_pktinfo.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/control_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/control_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/control_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/dgramopt.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/doc.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/endpoint.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/genericopt.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/header.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/helper.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/iana.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/icmp.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/icmp_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/icmp_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/packet.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/packet_go1_8.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/packet_go1_9.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/payload.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/payload_cmsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/payload_nocmsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sockopt.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sockopt_posix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sockopt_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_asmreq.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_asmreqn.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_dragonfly.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_freebsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/sys_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_netbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_openbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv4/zsys_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/batch.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/control.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/control_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/control_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/control_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/dgramopt.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/doc.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/endpoint.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/genericopt.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/header.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/helper.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/iana.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/icmp.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/icmp_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/icmp_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/icmp_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/icmp_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/icmp_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/payload.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/payload_cmsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/payload_nocmsg.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sockopt.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sockopt_posix.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sockopt_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_freebsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_stub.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/sys_windows.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_netbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_openbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/net/ipv6/zsys_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/net/lex/httplex/httplex.go create mode 100644 src/prometheus/vendor/golang.org/x/net/netutil/listen.go create mode 100644 src/prometheus/vendor/golang.org/x/net/trace/events.go create mode 100644 src/prometheus/vendor/golang.org/x/net/trace/histogram.go create mode 100644 src/prometheus/vendor/golang.org/x/net/trace/trace.go create mode 100644 src/prometheus/vendor/golang.org/x/net/trace/trace_go16.go create mode 100644 src/prometheus/vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/AUTHORS create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/README.md create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/appenginevm_hook.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/default.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/google.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/internal/token.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/oauth2.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/token.go create mode 100644 src/prometheus/vendor/golang.org/x/oauth2/transport.go create mode 100644 src/prometheus/vendor/golang.org/x/sync/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/sync/PATENTS create mode 100644 src/prometheus/vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/sys/PATENTS create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/README.md create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/constants.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/dirent.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/flock.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/flock_linux_32bit.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mkall.sh create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksyscall.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl create mode 100755 src/prometheus/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/openbsd_pledge.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/race.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/race0.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/str.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zptrace386_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zptracearm_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zptracemips_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 src/prometheus/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 src/prometheus/vendor/golang.org/x/text/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/text/PATENTS create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/cases.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/context.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/fold.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/gen.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/gen_trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/info.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/map.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/cases/trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/internal/tag/tag.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/Makefile create mode 100644 src/prometheus/vendor/golang.org/x/text/language/common.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/coverage.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/gen_common.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/gen_index.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/go1_1.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/go1_2.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/index.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/language.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/lookup.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/maketables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/match.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/parse.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/language/tags.go create mode 100644 src/prometheus/vendor/golang.org/x/text/runes/cond.go create mode 100644 src/prometheus/vendor/golang.org/x/text/runes/runes.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/class.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/context.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/doc.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/gen.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/gen_trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/nickname.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/options.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/profile.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/profiles.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/transformer.go create mode 100644 src/prometheus/vendor/golang.org/x/text/secure/precis/trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/transform/transform.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/gen.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 src/prometheus/vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/gen.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/gen_common.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/gen_trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/kind_string.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/tables.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/transform.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/trieval.go create mode 100644 src/prometheus/vendor/golang.org/x/text/width/width.go create mode 100644 src/prometheus/vendor/golang.org/x/time/LICENSE create mode 100644 src/prometheus/vendor/golang.org/x/time/PATENTS create mode 100644 src/prometheus/vendor/golang.org/x/time/rate/rate.go create mode 100644 src/prometheus/vendor/golang.org/x/time/rate/rate_go16.go create mode 100644 src/prometheus/vendor/golang.org/x/time/rate/rate_go17.go create mode 100644 src/prometheus/vendor/google.golang.org/api/LICENSE create mode 100644 src/prometheus/vendor/google.golang.org/api/compute/v1/compute-api.json create mode 100644 src/prometheus/vendor/google.golang.org/api/compute/v1/compute-gen.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/backoff.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/buffer.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/doc.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/go18.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/header.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/json.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/jsonfloat.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/media.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/not_go18.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/params.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/resumable.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/retry.go create mode 100644 src/prometheus/vendor/google.golang.org/api/gensupport/send.go create mode 100644 src/prometheus/vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 src/prometheus/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 src/prometheus/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go create mode 100644 src/prometheus/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go create mode 100644 src/prometheus/vendor/google.golang.org/api/googleapi/types.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/LICENSE create mode 100644 src/prometheus/vendor/google.golang.org/appengine/README.md create mode 100644 src/prometheus/vendor/google.golang.org/appengine/appengine.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/errors.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/identity.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/api.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100755 src/prometheus/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/identity.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/internal.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/modules/modules_service.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/net.go create mode 100755 src/prometheus/vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto create mode 100644 src/prometheus/vendor/google.golang.org/appengine/namespace.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/timeout.go create mode 100644 src/prometheus/vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 src/prometheus/vendor/google.golang.org/cloud/LICENSE create mode 100644 src/prometheus/vendor/google.golang.org/cloud/compute/metadata/metadata.go create mode 100644 src/prometheus/vendor/google.golang.org/cloud/internal/cloud.go create mode 100644 src/prometheus/vendor/google.golang.org/genproto/LICENSE create mode 100644 src/prometheus/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 src/prometheus/vendor/google.golang.org/grpc/LICENSE create mode 100644 src/prometheus/vendor/google.golang.org/grpc/Makefile create mode 100644 src/prometheus/vendor/google.golang.org/grpc/PATENTS create mode 100644 src/prometheus/vendor/google.golang.org/grpc/README.md create mode 100644 src/prometheus/vendor/google.golang.org/grpc/backoff.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/balancer.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/call.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/clientconn.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/codec.go create mode 100755 src/prometheus/vendor/google.golang.org/grpc/codegen.sh create mode 100644 src/prometheus/vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/codes/codes.go create mode 100755 src/prometheus/vendor/google.golang.org/grpc/coverage.sh create mode 100644 src/prometheus/vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/doc.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/go16.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/go17.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/grpclb.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto create mode 100644 src/prometheus/vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/interceptor.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/internal/internal.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/naming/naming.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/peer/peer.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/proxy.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/rpc_util.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/server.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/stats/stats.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/status/status.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/stream.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/tap/tap.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/trace.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/control.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/go16.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/go17.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/handler_server.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/http2_client.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/http2_server.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/http_util.go create mode 100644 src/prometheus/vendor/google.golang.org/grpc/transport/transport.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/README.md create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/app.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/args.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/global.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/model.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/values.go create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/values.json create mode 100644 src/prometheus/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/AUTHORS create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/CHANGELOG.md create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/CONTRIBUTING.md create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/LICENSE create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/README.md create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/fen.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/fsnotify.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/inotify_poller.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/kqueue.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_bsd.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/open_mode_darwin.go create mode 100644 src/prometheus/vendor/gopkg.in/fsnotify/fsnotify.v1/windows.go create mode 100644 src/prometheus/vendor/gopkg.in/inf.v0/LICENSE create mode 100644 src/prometheus/vendor/gopkg.in/inf.v0/dec.go create mode 100644 src/prometheus/vendor/gopkg.in/inf.v0/rounder.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/README.md create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/apic.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/decode.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/encode.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 src/prometheus/vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/LICENSE create mode 100755 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go create mode 100755 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/default.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/help.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go create mode 100755 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/math.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go create mode 100755 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/converter.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/helper.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/fields/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/fields/fields.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/fields/requirements.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/fields/selector.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/labels/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/labels/labels.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/labels/selector.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/openapi/common.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/openapi/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/codec.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/error.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/extension.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/helper.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/register.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/types.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/selection/operator.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/types/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/types/nodename.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/types/patch.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/types/uid.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/json/json.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/net/http.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/net/interface.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/net/util.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/sets/int.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/sets/string.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/version/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/version/types.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/watch/doc.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/watch/filter.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/watch/mux.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/watch/until.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/pkg/watch/watch.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go create mode 100644 src/prometheus/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/LICENSE create mode 100644 src/prometheus/vendor/k8s.io/client-go/discovery/discovery_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/discovery/fake/discovery.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/discovery/helper.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/discovery/restmapper.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/discovery/unstructured.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/clientset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/fake/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/import_known_versions.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/scheme/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/scheme/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_autoscaling_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake/fake_horizontalpodautoscaler.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/field_constants.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/helpers.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/api/install/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/json.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/ref.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/resource_helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/types.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/generate.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/meta.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/ref.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/util/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/util/template.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/util/umask.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/util/umask_windows.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/util/util.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/version/base.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/version/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/pkg/version/version.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/rest/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/client.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/config.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/plugin.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/request.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/transport.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/url_utils.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/urlbackoff.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/versions.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/watch/decoder.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/rest/watch/encoder.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/testing/actions.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/testing/fake.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/testing/fixture.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/tools/cache/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/controller.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/delta_fifo.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/expiration_cache.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/fifo.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/index.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/listers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/listwatch.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/mutation_detector.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/reflector.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/shared_informer.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/tools/cache/store.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/cache/undelta_store.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/clientcmd/api/register.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/clientcmd/api/types.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/tools/metrics/metrics.go create mode 100755 src/prometheus/vendor/k8s.io/client-go/transport/OWNERS create mode 100644 src/prometheus/vendor/k8s.io/client-go/transport/cache.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/transport/config.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/transport/round_trippers.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/transport/transport.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/cert/cert.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/cert/csr.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/cert/io.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/cert/pem.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/clock/clock.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/flowcontrol/backoff.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/flowcontrol/throttle.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/integer/integer.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/doc.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/metrics.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/parallelizer.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/queue.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go create mode 100644 src/prometheus/vendor/k8s.io/client-go/util/workqueue/timed_queue.go create mode 100644 src/prometheus/vendor/vendor.json create mode 100644 src/prometheus/web/api/v1/api.go create mode 100644 src/prometheus/web/api/v1/api_test.go create mode 100644 src/prometheus/web/api/v2/api.go create mode 100644 src/prometheus/web/federate.go create mode 100644 src/prometheus/web/federate_test.go create mode 100644 src/prometheus/web/ui/README.md create mode 100644 src/prometheus/web/ui/bindata.go create mode 100644 src/prometheus/web/ui/static/css/alerts.css create mode 100644 src/prometheus/web/ui/static/css/graph.css create mode 100644 src/prometheus/web/ui/static/css/prom_console.css create mode 100644 src/prometheus/web/ui/static/css/prometheus.css create mode 100644 src/prometheus/web/ui/static/css/rules.css create mode 100644 src/prometheus/web/ui/static/css/targets.css create mode 100644 src/prometheus/web/ui/static/img/ajax-loader.gif create mode 100644 src/prometheus/web/ui/static/img/favicon.ico create mode 100644 src/prometheus/web/ui/static/js/alerts.js create mode 100644 src/prometheus/web/ui/static/js/graph/graph_template.handlebar create mode 100644 src/prometheus/web/ui/static/js/graph/index.js create mode 100644 src/prometheus/web/ui/static/js/prom_console.js create mode 100644 src/prometheus/web/ui/static/js/targets.js create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/css/bootstrap-theme.css create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/css/bootstrap-theme.css.map create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/css/bootstrap-theme.min.css create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/css/bootstrap.css create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/css/bootstrap.css.map create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/css/bootstrap.min.css create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/fonts/glyphicons-halflings-regular.eot create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/fonts/glyphicons-halflings-regular.svg create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/fonts/glyphicons-halflings-regular.ttf create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/fonts/glyphicons-halflings-regular.woff create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/js/bootstrap.js create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/js/bootstrap.min.js create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap-3.3.1/js/npm.js create mode 100644 src/prometheus/web/ui/static/vendor/bootstrap3-typeahead/bootstrap3-typeahead.min.js create mode 100644 src/prometheus/web/ui/static/vendor/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.css create mode 100644 src/prometheus/web/ui/static/vendor/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.js create mode 100644 src/prometheus/web/ui/static/vendor/fuzzy/fuzzy.js create mode 100644 src/prometheus/web/ui/static/vendor/js/jquery.hotkeys.js create mode 100644 src/prometheus/web/ui/static/vendor/js/jquery.min.js create mode 100644 src/prometheus/web/ui/static/vendor/js/jquery.selection.js create mode 100644 src/prometheus/web/ui/static/vendor/moment/moment-timezone-with-data.min.js create mode 100644 src/prometheus/web/ui/static/vendor/moment/moment.min.js create mode 100644 src/prometheus/web/ui/static/vendor/mustache/mustache.min.js create mode 100644 src/prometheus/web/ui/static/vendor/rickshaw/rickshaw.min.css create mode 100644 src/prometheus/web/ui/static/vendor/rickshaw/rickshaw.min.js create mode 100644 src/prometheus/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js create mode 100644 src/prometheus/web/ui/static/vendor/rickshaw/vendor/d3.v3.js create mode 100644 src/prometheus/web/ui/templates/_base.html create mode 100644 src/prometheus/web/ui/templates/alerts.html create mode 100644 src/prometheus/web/ui/templates/config.html create mode 100644 src/prometheus/web/ui/templates/flags.html create mode 100644 src/prometheus/web/ui/templates/graph.html create mode 100644 src/prometheus/web/ui/templates/rules.html create mode 100644 src/prometheus/web/ui/templates/service-discovery.html create mode 100644 src/prometheus/web/ui/templates/status.html create mode 100644 src/prometheus/web/ui/templates/targets.html create mode 100644 src/prometheus/web/web.go create mode 100644 src/prometheus/web/web_test.go diff --git a/src/prometheus/.circleci/config.yml b/src/prometheus/.circleci/config.yml new file mode 100644 index 0000000..cfb7bad --- /dev/null +++ b/src/prometheus/.circleci/config.yml @@ -0,0 +1,119 @@ +--- +version: 2 + +jobs: + test: + docker: + - image: circleci/golang:1.10 + working_directory: /go/src/github.com/prometheus/prometheus + resource_class: large + + steps: + - checkout + - run: make promu + - run: make check_license style unused staticcheck build + - run: rm -v prometheus + + build: + machine: true + working_directory: /home/circleci/.go_workspace/src/github.com/prometheus/prometheus + + steps: + - checkout + - run: make promu + - run: promu crossbuild -v + - persist_to_workspace: + root: . + paths: + - .build + + docker_hub_master: + docker: + - image: circleci/golang:1.10 + working_directory: /go/src/github.com/prometheus/prometheus + + environment: + DOCKER_IMAGE_NAME: prom/prometheus + QUAY_IMAGE_NAME: quay.io/prometheus/prometheus + + steps: + - checkout + - setup_remote_docker + - attach_workspace: + at: . + - run: ln -s .build/linux-amd64/prometheus prometheus + - run: ln -s .build/linux-amd64/promtool promtool + - run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME + - run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME + - run: docker images + - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD + - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io + - run: docker push $DOCKER_IMAGE_NAME + - run: docker push $QUAY_IMAGE_NAME + + docker_hub_release_tags: + docker: + - image: circleci/golang:1.10 + working_directory: /go/src/github.com/prometheus/prometheus + + environment: + DOCKER_IMAGE_NAME: prom/prometheus + QUAY_IMAGE_NAME: quay.io/prometheus/prometheus + + steps: + - checkout + - setup_remote_docker + - run: mkdir -v -p ${HOME}/bin + - run: curl -L 'https://github.com/aktau/github-release/releases/download/v0.7.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C ${HOME}/bin + - run: echo 'export PATH=${HOME}/bin:${PATH}' >> ${BASH_ENV} + - attach_workspace: + at: . + - run: make promu + - run: promu crossbuild tarballs + - run: promu checksum .tarballs + - run: promu release .tarballs + - store_artifacts: + path: .tarballs + destination: releases + - run: ln -s .build/linux-amd64/prometheus prometheus + - run: ln -s .build/linux-amd64/promtool promtool + - run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG + - run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG + - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD + - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io + - run: | + if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then + docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" + docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" + fi + - run: docker push $DOCKER_IMAGE_NAME + - run: docker push $QUAY_IMAGE_NAME + +workflows: + version: 2 + prometheus: + jobs: + - test: + filters: + tags: + only: /.*/ + - build: + filters: + tags: + only: /.*/ + - docker_hub_master: + requires: + - test + - build + filters: + branches: + only: master + - docker_hub_release_tags: + requires: + - test + - build + filters: + tags: + only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ + branches: + ignore: /.*/ diff --git a/src/prometheus/.dockerignore b/src/prometheus/.dockerignore new file mode 100644 index 0000000..a4d092b --- /dev/null +++ b/src/prometheus/.dockerignore @@ -0,0 +1,5 @@ +data/ +.build/ +.tarballs/ + +!.build/linux-amd64/ diff --git a/src/prometheus/.github/ISSUE_TEMPLATE.md b/src/prometheus/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..288a1cb --- /dev/null +++ b/src/prometheus/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,55 @@ + +## Proposal +**Use case. Why is this important?** + +*Nice to have' is not a good use case :)* + +## Bug Report +**What did you do?** + +**What did you expect to see?** + +**What did you see instead? Under which circumstances?** + +**Environment** + +* System information: + + insert output of `uname -srm` here + +* Prometheus version: + + insert output of `prometheus --version` here + +* Alertmanager version: + + insert output of `alertmanager --version` here (if relevant to the issue) + +* Prometheus configuration file: +``` +insert configuration here +``` + +* Alertmanager configuration file: +``` +insert configuration here (if relevant to the issue) +``` + + + + +* Logs: +``` +insert Prometheus and Alertmanager logs relevant to the issue here +``` diff --git a/src/prometheus/.gitignore b/src/prometheus/.gitignore new file mode 100644 index 0000000..05ffbb9 --- /dev/null +++ b/src/prometheus/.gitignore @@ -0,0 +1,23 @@ +*# +.#* +*-stamp +/*.yaml +/*.yml +/*.rules +*.exe + +/prometheus +/promtool +benchmark.txt +/data +/cmd/prometheus/data +/cmd/prometheus/debug +/.build +/.release +/.tarballs + +!/circle.yml +!/.travis.yml +!/.promu.yml +/documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter +/documentation/examples/remote_storage/example_write_adapter/example_writer_adapter diff --git a/src/prometheus/.promu.yml b/src/prometheus/.promu.yml new file mode 100644 index 0000000..27aa3aa --- /dev/null +++ b/src/prometheus/.promu.yml @@ -0,0 +1,49 @@ +repository: + path: github.com/prometheus/prometheus +build: + binaries: + - name: prometheus + path: ./cmd/prometheus + - name: promtool + path: ./cmd/promtool + flags: -a -tags netgo + ldflags: | + -X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}} + -X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}} + -X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}} + -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}} + -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} +tarball: + files: + - consoles + - console_libraries + - documentation/examples/prometheus.yml + - LICENSE + - NOTICE +crossbuild: + platforms: + - linux/amd64 + - linux/386 + - darwin/amd64 + - darwin/386 + - windows/amd64 + - windows/386 + - freebsd/amd64 + - freebsd/386 + - openbsd/amd64 + - openbsd/386 + - netbsd/amd64 + - netbsd/386 + - dragonfly/amd64 + - linux/arm + - linux/arm64 + - freebsd/arm + # Temporarily deactivated as golang.org/x/sys does not have syscalls + # implemented for that os/platform combination. + #- openbsd/arm + #- linux/mips64 + #- linux/mips64le + - netbsd/arm + - linux/ppc64 + - linux/ppc64le + diff --git a/src/prometheus/.travis.yml b/src/prometheus/.travis.yml new file mode 100644 index 0000000..82c5f11 --- /dev/null +++ b/src/prometheus/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: +- 1.10.x +- 1.x + +go_import_path: github.com/prometheus/prometheus + +script: +- make check_license style unused test staticcheck diff --git a/src/prometheus/CHANGELOG.md b/src/prometheus/CHANGELOG.md new file mode 100644 index 0000000..11699d0 --- /dev/null +++ b/src/prometheus/CHANGELOG.md @@ -0,0 +1,1120 @@ +## 2.3.1 / 2018-06-19 + +* [BUGFIX] Avoid infinite loop on duplicate NaN values. #4275 +* [BUGFIX] Fix nil pointer deference when using various API endpoints #4282 +* [BUGFIX] config: set target group source index during unmarshalling #4245 +* [BUGFIX] discovery/file: fix logging #4178 +* [BUGFIX] kubernetes_sd: fix namespace filtering #4285 +* [BUGFIX] web: restore old path prefix behavior #4273 +* [BUGFIX] web: remove security headers added in 2.3.0 #4259 + +## 2.3.0 / 2018-06-05 + +* [CHANGE] `marathon_sd`: use `auth_token` and `auth_token_file` for token-based authentication instead of `bearer_token` and `bearer_token_file` respectively. +* [CHANGE] Metric names for HTTP server metrics changed +* [FEATURE] Add query commands to promtool +* [FEATURE] Add security headers to HTTP server responses +* [FEATURE] Pass query hints via remote read API +* [FEATURE] Basic auth passwords can now be configured via file across all configuration +* [ENHANCEMENT] Optimise PromQL and API serialization for memory usage and allocations +* [ENHANCEMENT] Limit number of dropped targets in web UI +* [ENHANCEMENT] Consul and EC2 service discovery allow using server-side filtering for performance improvement +* [ENHANCEMENT] Add advanced filtering configuration to EC2 service discovery +* [ENHANCEMENT] `marathon_sd`: adds support for basic and bearer authentication, plus all other common HTTP client options (TLS config, proxy URL, etc.) +* [ENHANCEMENT] Provide machine type metadata and labels in GCE service discovery +* [ENHANCEMENT] Add pod controller kind and name to Kubernetes service discovery data +* [ENHANCEMENT] Move TSDB to flock-based log file that works with Docker containers +* [BUGFIX] Properly propagate storage errors in PromQL +* [BUGFIX] Fix path prefix for web pages +* [BUGFIX] Fix goroutine leak in Consul service discovery +* [BUGFIX] Fix races in scrape manager +* [BUGFIX] Fix OOM for very large k in PromQL topk() queries +* [BUGFIX] Make remote write more resilient to unavailable receivers +* [BUGFIX] Make remote write shutdown cleanly +* [BUGFIX] Don't leak files on errors in TSDB's tombstone cleanup +* [BUGFIX] Unary minus expressions now removes the metric name from results +* [BUGFIX] Fix bug that lead to wrong amount of samples considered for time range expressions + +## 2.2.1 / 2018-03-13 + +* [BUGFIX] Fix data loss in TSDB on compaction +* [BUGFIX] Correctly stop timer in remote-write path +* [BUGFIX] Fix deadlock triggerd by loading targets page +* [BUGFIX] Fix incorrect buffering of samples on range selection queries +* [BUGFIX] Handle large index files on windows properly + +## 2.2.0 / 2018-03-08 + +* [CHANGE] Rename file SD mtime metric. +* [CHANGE] Send target update on empty pod IP in Kubernetes SD. +* [FEATURE] Add API endpoint for flags. +* [FEATURE] Add API endpoint for dropped targets. +* [FEATURE] Display annotations on alerts page. +* [FEATURE] Add option to skip head data when taking snapshots. +* [ENHANCEMENT] Federation performance improvement. +* [ENHANCEMENT] Read bearer token file on every scrape. +* [ENHANCEMENT] Improve typeahead on `/graph` page. +* [ENHANCEMENT] Change rule file formatting. +* [ENHANCEMENT] Set consul server default to `localhost:8500`. +* [ENHANCEMENT] Add dropped Alertmanagers to API info endpoint. +* [ENHANCEMENT] Add OS type meta label to Azure SD. +* [ENHANCEMENT] Validate required fields in SD configuration. +* [BUGFIX] Prevent stack overflow on deep recursion in TSDB. +* [BUGFIX] Correctly read offsets in index files that are greater than 4GB. +* [BUGFIX] Fix scraping behavior for empty labels. +* [BUGFIX] Drop metric name for bool modifier. +* [BUGFIX] Fix races in discovery. +* [BUGFIX] Fix Kubernetes endpoints SD for empty subsets. +* [BUGFIX] Throttle updates from SD providers, which caused increased CPU usage and allocations. +* [BUGFIX] Fix TSDB block reload issue. +* [BUGFIX] Fix PromQL printing of empty `without()`. +* [BUGFIX] Don't reset FiredAt for inactive alerts. +* [BUGFIX] Fix erroneous file version changes and repair existing data. + +## 2.1.0 / 2018-01-19 + +* [FEATURE] New Service Discovery UI showing labels before and after relabelling. +* [FEATURE] New Admin APIs added to v1 to delete, snapshot and remove tombstones. +* [ENHANCEMENT] The graph UI autcomplete now includes your previous queries. +* [ENHANCEMENT] Federation is now much faster for large numbers of series. +* [ENHANCEMENT] Added new metrics to measure rule timings. +* [ENHANCEMENT] Rule evaluation times added to the rules UI. +* [ENHANCEMENT] Added metrics to measure modified time of file SD files. +* [ENHANCEMENT] Kubernetes SD now includes POD UID in discovery metadata. +* [ENHANCEMENT] The Query APIs now return optional stats on query execution times. +* [ENHANCEMENT] The index now no longer has the 4GiB size limit and is also smaller. +* [BUGFIX] Remote read `read_recent` option is now false by default. +* [BUGFIX] Pass the right configuration to each Alertmanager (AM) when using multiple AM configs. +* [BUGFIX] Fix not-matchers not selecting series with labels unset. +* [BUGFIX] tsdb: Fix occasional panic in head block. +* [BUGFIX] tsdb: Close files before deletion to fix retention issues on Windows and NFS. +* [BUGFIX] tsdb: Cleanup and do not retry failing compactions. +* [BUGFIX] tsdb: Close WAL while shutting down. + + +## 2.0.0 / 2017-11-08 + +This release includes a completely rewritten storage, huge performance +improvements, but also many backwards incompatible changes. For more +information, read the announcement blog post and migration guide. + +https://prometheus.io/blog/2017/11/08/announcing-prometheus-2-0/ +https://prometheus.io/docs/prometheus/2.0/migration/ + +* [CHANGE] Completely rewritten storage layer, with WAL. This is not backwards compatible with 1.x storage, and many flags have changed/disappeared. +* [CHANGE] New staleness behavior. Series now marked stale after target scrapes no longer return them, and soon after targets disappear from service discovery. +* [CHANGE] Rules files use YAML syntax now. Conversion tool added to promtool. +* [CHANGE] Removed `count_scalar`, `drop_common_labels` functions and `keep_common` modifier from PromQL. +* [CHANGE] Rewritten exposition format parser with much higher performance. The Protobuf exposition format is no longer supported. +* [CHANGE] Example console templates updated for new storage and metrics names. Examples other than node exporter and Prometheus removed. +* [CHANGE] Admin and lifecycle APIs now disabled by default, can be re-enabled via flags +* [CHANGE] Flags switched to using Kingpin, all flags are now --flagname rather than -flagname. +* [FEATURE/CHANGE] Remote read can be configured to not read data which is available locally. This is enabled by default. +* [FEATURE] Rules can be grouped now. Rules within a rule group are executed sequentially. +* [FEATURE] Added experimental GRPC apis +* [FEATURE] Add timestamp() function to PromQL. +* [ENHANCEMENT] Remove remote read from the query path if no remote storage is configured. +* [ENHANCEMENT] Bump Consul HTTP client timeout to not match the Consul SD watch timeout. +* [ENHANCEMENT] Go-conntrack added to provide HTTP connection metrics. +* [BUGFIX] Fix connection leak in Consul SD. + +## 1.8.2 / 2017-11-04 + +* [BUGFIX] EC2 service discovery: Do not crash if tags are empty. + +## 1.8.1 / 2017-10-19 + +* [BUGFIX] Correctly handle external labels on remote read endpoint + +## 1.8.0 / 2017-10-06 + +* [CHANGE] Rule links link to the _Console_ tab rather than the _Graph_ tab to + not trigger expensive range queries by default. +* [FEATURE] Ability to act as a remote read endpoint for other Prometheus + servers. +* [FEATURE] K8s SD: Support discovery of ingresses. +* [FEATURE] Consul SD: Support for node metadata. +* [FEATURE] Openstack SD: Support discovery of hypervisors. +* [FEATURE] Expose current Prometheus config via `/status/config`. +* [FEATURE] Allow to collapse jobs on `/targets` page. +* [FEATURE] Add `/-/healthy` and `/-/ready` endpoints. +* [FEATURE] Add color scheme support to console templates. +* [ENHANCEMENT] Remote storage connections use HTTP keep-alive. +* [ENHANCEMENT] Improved logging about remote storage. +* [ENHANCEMENT] Relaxed URL validation. +* [ENHANCEMENT] Openstack SD: Handle instances without IP. +* [ENHANCEMENT] Make remote storage queue manager configurable. +* [ENHANCEMENT] Validate metrics returned from remote read. +* [ENHANCEMENT] EC2 SD: Set a default region. +* [ENHANCEMENT] Changed help link to `https://prometheus.io/docs`. +* [BUGFIX] Fix floating-point precision issue in `deriv` function. +* [BUGFIX] Fix pprof endpoints when -web.route-prefix or -web.external-url is + used. +* [BUGFIX] Fix handling of `null` target groups in file-based SD. +* [BUGFIX] Set the sample timestamp in date-related PromQL functions. +* [BUGFIX] Apply path prefix to redirect from deprecated graph URL. +* [BUGFIX] Fixed tests on MS Windows. +* [BUGFIX] Check for invalid UTF-8 in label values after relabeling. + +## 1.7.2 / 2017-09-26 + +* [BUGFIX] Correctly remove all targets from DNS service discovery if the + corresponding DNS query succeeds and returns an empty result. +* [BUGFIX] Correctly parse resolution input in expression browser. +* [BUGFIX] Consistently use UTC in the date picker of the expression browser. +* [BUGFIX] Correctly handle multiple ports in Marathon service discovery. +* [BUGFIX] Fix HTML escaping so that HTML templates compile with Go1.9. +* [BUGFIX] Prevent number of remote write shards from going negative. +* [BUGFIX] In the graphs created by the expression browser, render very large + and small numbers in a readable way. +* [BUGFIX] Fix a rarely occurring iterator issue in varbit encoded chunks. + +## 1.7.1 / 2017-06-12 + +* [BUGFIX] Fix double prefix redirect. + +## 1.7.0 / 2017-06-06 + +* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy. +* [CHANGE] Properly ellide secrets in config. +* [FEATURE] Add OpenStack service discovery. +* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces. +* [FEATURE] Add metric for discovered number of Alertmanagers. +* [ENHANCEMENT] Print system information (uname) on start up. +* [ENHANCEMENT] Show gaps in graphs on expression browser. +* [ENHANCEMENT] Promtool linter checks counter naming and more reserved labels. +* [BUGFIX] Fix broken Mesos discovery. +* [BUGFIX] Fix redirect when external URL is set. +* [BUGFIX] Fix mutation of active alert elements by notifier. +* [BUGFIX] Fix HTTP error handling for remote write. +* [BUGFIX] Fix builds for Solaris/Illumos. +* [BUGFIX] Fix overflow checking in global config. +* [BUGFIX] Fix log level reporting issue. +* [BUGFIX] Fix ZooKeeper serverset discovery can become out-of-sync. + +## 1.6.3 / 2017-05-18 + +* [BUGFIX] Fix disappearing Alertmanger targets in Alertmanager discovery. +* [BUGFIX] Fix panic with remote_write on ARMv7. +* [BUGFIX] Fix stacked graphs to adapt min/max values. + +## 1.6.2 / 2017-05-11 + +* [BUGFIX] Fix potential memory leak in Kubernetes service discovery + +## 1.6.1 / 2017-04-19 + +* [BUGFIX] Don't panic if storage has no FPs even after initial wait + +## 1.6.0 / 2017-04-14 + +* [CHANGE] Replaced the remote write implementations for various backends by a + generic write interface with example adapter implementation for various + backends. Note that both the previous and the current remote write + implementations are **experimental**. +* [FEATURE] New flag `-storage.local.target-heap-size` to tell Prometheus about + the desired heap size. This deprecates the flags + `-storage.local.memory-chunks` and `-storage.local.max-chunks-to-persist`, + which are kept for backward compatibility. +* [FEATURE] Add `check-metrics` to `promtool` to lint metric names. +* [FEATURE] Add Joyent Triton discovery. +* [FEATURE] `X-Prometheus-Scrape-Timeout-Seconds` header in HTTP scrape + requests. +* [FEATURE] Remote read interface, including example for InfluxDB. **Experimental.** +* [FEATURE] Enable Consul SD to connect via TLS. +* [FEATURE] Marathon SD supports multiple ports. +* [FEATURE] Marathon SD supports bearer token for authentication. +* [FEATURE] Custom timeout for queries. +* [FEATURE] Expose `buildQueryUrl` in `graph.js`. +* [FEATURE] Add `rickshawGraph` property to the graph object in console + templates. +* [FEATURE] New metrics exported by Prometheus itself: + * Summary `prometheus_engine_query_duration_seconds` + * Counter `prometheus_evaluator_iterations_missed_total` + * Counter `prometheus_evaluator_iterations_total` + * Gauge `prometheus_local_storage_open_head_chunks` + * Gauge `prometheus_local_storage_target_heap_size` +* [ENHANCEMENT] Reduce shut-down time by interrupting an ongoing checkpoint + before starting the final checkpoint. +* [ENHANCEMENT] Auto-tweak times between checkpoints to limit time spent in + checkpointing to 50%. +* [ENHANCEMENT] Improved crash recovery deals better with certain index + corruptions. +* [ENHANCEMENT] Graphing deals better with constant time series. +* [ENHANCEMENT] Retry remote writes on recoverable errors. +* [ENHANCEMENT] Evict unused chunk descriptors during crash recovery to limit + memory usage. +* [ENHANCEMENT] Smoother disk usage during series maintenance. +* [ENHANCEMENT] Targets on targets page sorted by instance within a job. +* [ENHANCEMENT] Sort labels in federation. +* [ENHANCEMENT] Set `GOGC=40` by default, which results in much better memory + utilization at the price of slightly higher CPU usage. If `GOGC` is set by + the user, it is still honored as usual. +* [ENHANCEMENT] Close head chunks after being idle for the duration of the + configured staleness delta. This helps to persist and evict head chunk of + stale series more quickly. +* [ENHANCEMENT] Stricter checking of relabel config. +* [ENHANCEMENT] Cache busters for static web content. +* [ENHANCEMENT] Send Prometheus-specific user-agent header during scrapes. +* [ENHANCEMENT] Improved performance of series retention cut-off. +* [ENHANCEMENT] Mitigate impact of non-atomic sample ingestion on + `histogram_quantile` by enforcing buckets to be monotonic. +* [ENHANCEMENT] Released binaries built with Go 1.8.1. +* [BUGFIX] Send `instance=""` with federation if `instance` not set. +* [BUGFIX] Update to new `client_golang` to get rid of unwanted quantile + metrics in summaries. +* [BUGFIX] Introduce several additional guards against data corruption. +* [BUGFIX] Mark storage dirty and increment + `prometheus_local_storage_persist_errors_total` on all relevant errors. +* [BUGFIX] Propagate storage errors as 500 in the HTTP API. +* [BUGFIX] Fix int64 overflow in timestamps in the HTTP API. +* [BUGFIX] Fix deadlock in Zookeeper SD. +* [BUGFIX] Fix fuzzy search problems in the web-UI auto-completion. + +## 1.5.3 / 2017-05-11 + +* [BUGFIX] Fix potential memory leak in Kubernetes service discovery + +## 1.5.2 / 2017-02-10 + +* [BUGFIX] Fix series corruption in a special case of series maintenance where + the minimum series-file-shrink-ratio kicks in. +* [BUGFIX] Fix two panic conditions both related to processing a series + scheduled to be quarantined. +* [ENHANCEMENT] Binaries built with Go1.7.5. + +## 1.5.1 / 2017-02-07 + +* [BUGFIX] Don't lose fully persisted memory series during checkpointing. +* [BUGFIX] Fix intermittently failing relabeling. +* [BUGFIX] Make `-storage.local.series-file-shrink-ratio` work. +* [BUGFIX] Remove race condition from TestLoop. + +## 1.5.0 / 2017-01-23 + +* [CHANGE] Use lexicographic order to sort alerts by name. +* [FEATURE] Add Joyent Triton discovery. +* [FEATURE] Add scrape targets and alertmanager targets API. +* [FEATURE] Add various persistence related metrics. +* [FEATURE] Add various query engine related metrics. +* [FEATURE] Add ability to limit scrape samples, and related metrics. +* [FEATURE] Add labeldrop and labelkeep relabelling actions. +* [FEATURE] Display current working directory on status-page. +* [ENHANCEMENT] Strictly use ServiceAccount for in cluster configuration on Kubernetes. +* [ENHANCEMENT] Various performance and memory-management improvements. +* [BUGFIX] Fix basic auth for alertmanagers configured via flag. +* [BUGFIX] Don't panic on decoding corrupt data. +* [BUGFIX] Ignore dotfiles in data directory. +* [BUGFIX] Abort on intermediate federation errors. + +## 1.4.1 / 2016-11-28 + +* [BUGFIX] Fix Consul service discovery + +## 1.4.0 / 2016-11-25 + +* [FEATURE] Allow configuring Alertmanagers via service discovery +* [FEATURE] Display used Alertmanagers on runtime page in the UI +* [FEATURE] Support profiles in AWS EC2 service discovery configuration +* [ENHANCEMENT] Remove duplicated logging of Kubernetes client errors +* [ENHANCEMENT] Add metrics about Kubernetes service discovery +* [BUGFIX] Update alert annotations on re-evaluation +* [BUGFIX] Fix export of group modifier in PromQL queries +* [BUGFIX] Remove potential deadlocks in several service discovery implementations +* [BUGFIX] Use proper float64 modulo in PromQL `%` binary operations +* [BUGFIX] Fix crash bug in Kubernetes service discovery + +## 1.3.1 / 2016-11-04 + +This bug-fix release pulls in the fixes from the 1.2.3 release. + +* [BUGFIX] Correctly handle empty Regex entry in relabel config. +* [BUGFIX] MOD (`%`) operator doesn't panic with small floating point numbers. +* [BUGFIX] Updated miekg/dns vendoring to pick up upstream bug fixes. +* [ENHANCEMENT] Improved DNS error reporting. + +## 1.2.3 / 2016-11-04 + +Note that this release is chronologically after 1.3.0. + +* [BUGFIX] Correctly handle end time before start time in range queries. +* [BUGFIX] Error on negative `-storage.staleness-delta` +* [BUGFIX] Correctly handle empty Regex entry in relabel config. +* [BUGFIX] MOD (`%`) operator doesn't panic with small floating point numbers. +* [BUGFIX] Updated miekg/dns vendoring to pick up upstream bug fixes. +* [ENHANCEMENT] Improved DNS error reporting. + +## 1.3.0 / 2016-11-01 + +This is a breaking change to the Kubernetes service discovery. + +* [CHANGE] Rework Kubernetes SD. +* [FEATURE] Add support for interpolating `target_label`. +* [FEATURE] Add GCE metadata as Prometheus meta labels. +* [ENHANCEMENT] Add EC2 SD metrics. +* [ENHANCEMENT] Add Azure SD metrics. +* [ENHANCEMENT] Add fuzzy search to `/graph` textarea. +* [ENHANCEMENT] Always show instance labels on target page. +* [BUGFIX] Validate query end time is not before start time. +* [BUGFIX] Error on negative `-storage.staleness-delta` + +## 1.2.2 / 2016-10-30 + +* [BUGFIX] Correctly handle on() in alerts. +* [BUGFIX] UI: Deal properly with aborted requests. +* [BUGFIX] UI: Decode URL query parameters properly. +* [BUGFIX] Storage: Deal better with data corruption (non-monotonic timestamps). +* [BUGFIX] Remote storage: Re-add accidentally removed timeout flag. +* [BUGFIX] Updated a number of vendored packages to pick up upstream bug fixes. + +## 1.2.1 / 2016-10-10 + +* [BUGFIX] Count chunk evictions properly so that the server doesn't + assume it runs out of memory and subsequencly throttles ingestion. +* [BUGFIX] Use Go1.7.1 for prebuilt binaries to fix issues on MacOS Sierra. + +## 1.2.0 / 2016-10-07 + +* [FEATURE] Cleaner encoding of query parameters in `/graph` URLs. +* [FEATURE] PromQL: Add `minute()` function. +* [FEATURE] Add GCE service discovery. +* [FEATURE] Allow any valid UTF-8 string as job name. +* [FEATURE] Allow disabling local storage. +* [FEATURE] EC2 service discovery: Expose `ec2_instance_state`. +* [ENHANCEMENT] Various performance improvements in local storage. +* [BUGFIX] Zookeeper service discovery: Remove deleted nodes. +* [BUGFIX] Zookeeper service discovery: Resync state after Zookeeper failure. +* [BUGFIX] Remove JSON from HTTP Accept header. +* [BUGFIX] Fix flag validation of Alertmanager URL. +* [BUGFIX] Fix race condition on shutdown. +* [BUGFIX] Do not fail Consul discovery on Prometheus startup when Consul + is down. +* [BUGFIX] Handle NaN in `changes()` correctly. +* [CHANGE] **Experimental** remote write path: Remove use of gRPC. +* [CHANGE] **Experimental** remote write path: Configuration via config file + rather than command line flags. +* [FEATURE] **Experimental** remote write path: Add HTTP basic auth and TLS. +* [FEATURE] **Experimental** remote write path: Support for relabelling. + +## 1.1.3 / 2016-09-16 + +* [ENHANCEMENT] Use golang-builder base image for tests in CircleCI. +* [ENHANCEMENT] Added unit tests for federation. +* [BUGFIX] Correctly de-dup metric families in federation output. + +## 1.1.2 / 2016-09-08 + +* [BUGFIX] Allow label names that coincide with keywords. + +## 1.1.1 / 2016-09-07 + +* [BUGFIX] Fix IPv6 escaping in service discovery integrations +* [BUGFIX] Fix default scrape port assignment for IPv6 + +## 1.1.0 / 2016-09-03 + +* [FEATURE] Add `quantile` and `quantile_over_time`. +* [FEATURE] Add `stddev_over_time` and `stdvar_over_time`. +* [FEATURE] Add various time and date functions. +* [FEATURE] Added `toUpper` and `toLower` formatting to templates. +* [FEATURE] Allow relabeling of alerts. +* [FEATURE] Allow URLs in targets defined via a JSON file. +* [FEATURE] Add idelta function. +* [FEATURE] 'Remove graph' button on the /graph page. +* [FEATURE] Kubernetes SD: Add node name and host IP to pod discovery. +* [FEATURE] New remote storage write path. EXPERIMENTAL! +* [ENHANCEMENT] Improve time-series index lookups. +* [ENHANCEMENT] Forbid invalid relabel configurations. +* [ENHANCEMENT] Improved various tests. +* [ENHANCEMENT] Add crash recovery metric 'started_dirty'. +* [ENHANCEMENT] Fix (and simplify) populating series iterators. +* [ENHANCEMENT] Add job link on target page. +* [ENHANCEMENT] Message on empty Alerts page. +* [ENHANCEMENT] Various internal code refactorings and clean-ups. +* [ENHANCEMENT] Various improvements in the build system. +* [BUGFIX] Catch errors when unmarshalling delta/doubleDelta encoded chunks. +* [BUGFIX] Fix data race in lexer and lexer test. +* [BUGFIX] Trim stray whitespace from bearer token file. +* [BUGFIX] Avoid divide-by-zero panic on query_range?step=0. +* [BUGFIX] Detect invalid rule files at startup. +* [BUGFIX] Fix counter reset treatment in PromQL. +* [BUGFIX] Fix rule HTML escaping issues. +* [BUGFIX] Remove internal labels from alerts sent to AM. + +## 1.0.2 / 2016-08-24 + +* [BUGFIX] Clean up old targets after config reload. + +## 1.0.1 / 2016-07-21 + +* [BUGFIX] Exit with error on non-flag command-line arguments. +* [BUGFIX] Update example console templates to new HTTP API. +* [BUGFIX] Re-add logging flags. + +## 1.0.0 / 2016-07-18 + +* [CHANGE] Remove deprecated query language keywords +* [CHANGE] Change Kubernetes SD to require specifying Kubernetes role +* [CHANGE] Use service address in Consul SD if available +* [CHANGE] Standardize all Prometheus internal metrics to second units +* [CHANGE] Remove unversioned legacy HTTP API +* [CHANGE] Remove legacy ingestion of JSON metric format +* [CHANGE] Remove deprecated `target_groups` configuration +* [FEATURE] Add binary power operation to PromQL +* [FEATURE] Add `count_values` aggregator +* [FEATURE] Add `-web.route-prefix` flag +* [FEATURE] Allow `on()`, `by()`, `without()` in PromQL with empty label sets +* [ENHANCEMENT] Make `topk/bottomk` query functions aggregators +* [BUGFIX] Fix annotations in alert rule printing +* [BUGFIX] Expand alert templating at evaluation time +* [BUGFIX] Fix edge case handling in crash recovery +* [BUGFIX] Hide testing package flags from help output + +## 0.20.0 / 2016-06-15 + +This release contains multiple breaking changes to the configuration schema. + +* [FEATURE] Allow configuring multiple Alertmanagers +* [FEATURE] Add server name to TLS configuration +* [FEATURE] Add labels for all node addresses and discover node port if available in Kubernetes SD +* [ENHANCEMENT] More meaningful configuration errors +* [ENHANCEMENT] Round scraping timestamps to milliseconds in web UI +* [ENHANCEMENT] Make number of storage fingerprint locks configurable +* [BUGFIX] Fix date parsing in console template graphs +* [BUGFIX] Fix static console files in Docker images +* [BUGFIX] Fix console JS XHR requests for IE11 +* [BUGFIX] Add missing path prefix in new status page +* [CHANGE] Rename `target_groups` to `static_configs` in config files +* [CHANGE] Rename `names` to `files` in file SD configuration +* [CHANGE] Remove kubelet port config option in Kubernetes SD configuration + +## 0.19.3 / 2016-06-14 + +* [BUGFIX] Handle Marathon apps with zero ports +* [BUGFIX] Fix startup panic in retrieval layer + +## 0.19.2 / 2016-05-29 + +* [BUGFIX] Correctly handle `GROUP_LEFT` and `GROUP_RIGHT` without labels in + string representation of expressions and in rules. +* [BUGFIX] Use `-web.external-url` for new status endpoints. + +## 0.19.1 / 2016-05-25 + +* [BUGFIX] Handle service discovery panic affecting Kubernetes SD +* [BUGFIX] Fix web UI display issue in some browsers + +## 0.19.0 / 2016-05-24 + +This version contains a breaking change to the query language. Please read +the documentation on the grouping behavior of vector matching: + +https://prometheus.io/docs/querying/operators/#vector-matching + +* [FEATURE] Add experimental Microsoft Azure service discovery +* [FEATURE] Add `ignoring` modifier for binary operations +* [FEATURE] Add pod discovery to Kubernetes service discovery +* [CHANGE] Vector matching takes grouping labels from one-side +* [ENHANCEMENT] Support time range on /api/v1/series endpoint +* [ENHANCEMENT] Partition status page into individual pages +* [BUGFIX] Fix issue of hanging target scrapes + +## 0.18.0 / 2016-04-18 + +* [BUGFIX] Fix operator precedence in PromQL +* [BUGFIX] Never drop still open head chunk +* [BUGFIX] Fix missing 'keep_common' when printing AST node +* [CHANGE/BUGFIX] Target identity considers path and parameters additionally to host and port +* [CHANGE] Rename metric `prometheus_local_storage_invalid_preload_requests_total` to `prometheus_local_storage_non_existent_series_matches_total` +* [CHANGE] Support for old alerting rule syntax dropped +* [FEATURE] Deduplicate targets within the same scrape job +* [FEATURE] Add varbit chunk encoding (higher compression, more CPU usage – disabled by default) +* [FEATURE] Add `holt_winters` query function +* [FEATURE] Add relative complement `unless` operator to PromQL +* [ENHANCEMENT] Quarantine series file if data corruption is encountered (instead of crashing) +* [ENHANCEMENT] Validate Alertmanager URL +* [ENHANCEMENT] Use UTC for build timestamp +* [ENHANCEMENT] Improve index query performance (especially for active time series) +* [ENHANCEMENT] Instrument configuration reload duration +* [ENHANCEMENT] Instrument retrieval layer +* [ENHANCEMENT] Add Go version to `prometheus_build_info` metric + +## 0.17.0 / 2016-03-02 + +This version no longer works with Alertmanager 0.0.4 and earlier! +The alerting rule syntax has changed as well but the old syntax is supported +up until version 0.18. + +All regular expressions in PromQL are anchored now, matching the behavior of +regular expressions in config files. + +* [CHANGE] Integrate with Alertmanager 0.1.0 and higher +* [CHANGE] Degraded storage mode renamed to rushed mode +* [CHANGE] New alerting rule syntax +* [CHANGE] Add label validation on ingestion +* [CHANGE] Regular expression matchers in PromQL are anchored +* [FEATURE] Add `without` aggregation modifier +* [FEATURE] Send alert resolved notifications to Alertmanager +* [FEATURE] Allow millisecond precision in configuration file +* [FEATURE] Support AirBnB's Smartstack Nerve for service discovery +* [ENHANCEMENT] Storage switches less often between regular and rushed mode. +* [ENHANCEMENT] Storage switches into rushed mode if there are too many memory chunks. +* [ENHANCEMENT] Added more storage instrumentation +* [ENHANCEMENT] Improved instrumentation of notification handler +* [BUGFIX] Do not count head chunks as chunks waiting for persistence +* [BUGFIX] Handle OPTIONS HTTP requests to the API correctly +* [BUGFIX] Parsing of ranges in PromQL fixed +* [BUGFIX] Correctly validate URL flag parameters +* [BUGFIX] Log argument parse errors +* [BUGFIX] Properly handle creation of target with bad TLS config +* [BUGFIX] Fix of checkpoint timing issue + +## 0.16.2 / 2016-01-18 + +* [FEATURE] Multiple authentication options for EC2 discovery added +* [FEATURE] Several meta labels for EC2 discovery added +* [FEATURE] Allow full URLs in static target groups (used e.g. by the `blackbox_exporter`) +* [FEATURE] Add Graphite remote-storage integration +* [FEATURE] Create separate Kubernetes targets for services and their endpoints +* [FEATURE] Add `clamp_{min,max}` functions to PromQL +* [FEATURE] Omitted time parameter in API query defaults to now +* [ENHANCEMENT] Less frequent time series file truncation +* [ENHANCEMENT] Instrument number of manually deleted time series +* [ENHANCEMENT] Ignore lost+found directory during storage version detection +* [CHANGE] Kubernetes `masters` renamed to `api_servers` +* [CHANGE] "Healthy" and "unhealthy" targets are now called "up" and "down" in the web UI +* [CHANGE] Remove undocumented 2nd argument of the `delta` function. + (This is a BREAKING CHANGE for users of the undocumented 2nd argument.) +* [BUGFIX] Return proper HTTP status codes on API errors +* [BUGFIX] Fix Kubernetes authentication configuration +* [BUGFIX] Fix stripped OFFSET from in rule evaluation and display +* [BUGFIX] Do not crash on failing Consul SD initialization +* [BUGFIX] Revert changes to metric auto-completion +* [BUGFIX] Add config overflow validation for TLS configuration +* [BUGFIX] Skip already watched Zookeeper nodes in serverset SD +* [BUGFIX] Don't federate stale samples +* [BUGFIX] Move NaN to end of result for `topk/bottomk/sort/sort_desc/min/max` +* [BUGFIX] Limit extrapolation of `delta/rate/increase` +* [BUGFIX] Fix unhandled error in rule evaluation + +Some changes to the Kubernetes service discovery were integration since +it was released as a beta feature. + +## 0.16.1 / 2015-10-16 + +* [FEATURE] Add `irate()` function. +* [ENHANCEMENT] Improved auto-completion in expression browser. +* [CHANGE] Kubernetes SD moves node label to instance label. +* [BUGFIX] Escape regexes in console templates. + +## 0.16.0 / 2015-10-09 + +BREAKING CHANGES: + +* Release tarballs now contain the built binaries in a nested directory. +* The `hash_mod` relabeling action now uses MD5 hashes instead of FNV hashes to + achieve a better distribution. +* The DNS-SD meta label `__meta_dns_srv_name` was renamed to `__meta_dns_name` + to reflect support for DNS record types other than `SRV`. +* The default full refresh interval for the file-based service discovery has been + increased from 30 seconds to 5 minutes. +* In relabeling, parts of a source label that weren't matched by + the specified regular expression are no longer included in the replacement + output. +* Queries no longer interpolate between two data points. Instead, the resulting + value will always be the latest value before the evaluation query timestamp. +* Regular expressions supplied via the configuration are now anchored to match + full strings instead of substrings. +* Global labels are not appended upon storing time series anymore. Instead, + they are only appended when communicating with external systems + (Alertmanager, remote storages, federation). They have thus also been renamed + from `global.labels` to `global.external_labels`. +* The names and units of metrics related to remote storage sample appends have + been changed. +* The experimental support for writing to InfluxDB has been updated to work + with InfluxDB 0.9.x. 0.8.x versions of InfluxDB are not supported anymore. +* Escape sequences in double- and single-quoted string literals in rules or query + expressions are now interpreted like escape sequences in Go string literals + (https://golang.org/ref/spec#String_literals). + +Future breaking changes / deprecated features: + +* The `delta()` function had an undocumented optional second boolean argument + to make it behave like `increase()`. This second argument will be removed in + the future. Migrate any occurrences of `delta(x, 1)` to use `increase(x)` + instead. +* Support for filter operators between two scalar values (like `2 > 1`) will be + removed in the future. These will require a `bool` modifier on the operator, + e.g. `2 > bool 1`. + +All changes: + +* [CHANGE] Renamed `global.labels` to `global.external_labels`. +* [CHANGE] Vendoring is now done via govendor instead of godep. +* [CHANGE] Change web UI root page to show the graphing interface instead of + the server status page. +* [CHANGE] Append global labels only when communicating with external systems + instead of storing them locally. +* [CHANGE] Change all regexes in the configuration to do full-string matches + instead of substring matches. +* [CHANGE] Remove interpolation of vector values in queries. +* [CHANGE] For alert `SUMMARY`/`DESCRIPTION` template fields, cast the alert + value to `float64` to work with common templating functions. +* [CHANGE] In relabeling, don't include unmatched source label parts in the + replacement. +* [CHANGE] Change default full refresh interval for the file-based service + discovery from 30 seconds to 5 minutes. +* [CHANGE] Rename the DNS-SD meta label `__meta_dns_srv_name` to + `__meta_dns_name` to reflect support for other record types than `SRV`. +* [CHANGE] Release tarballs now contain the binaries in a nested directory. +* [CHANGE] Update InfluxDB write support to work with InfluxDB 0.9.x. +* [FEATURE] Support full "Go-style" escape sequences in strings and add raw + string literals. +* [FEATURE] Add EC2 service discovery support. +* [FEATURE] Allow configuring TLS options in scrape configurations. +* [FEATURE] Add instrumentation around configuration reloads. +* [FEATURE] Add `bool` modifier to comparison operators to enable boolean + (`0`/`1`) output instead of filtering. +* [FEATURE] In Zookeeper serverset discovery, provide `__meta_serverset_shard` + label with the serverset shard number. +* [FEATURE] Provide `__meta_consul_service_id` meta label in Consul service + discovery. +* [FEATURE] Allow scalar expressions in recording rules to enable use cases + such as building constant metrics. +* [FEATURE] Add `label_replace()` and `vector()` query language functions. +* [FEATURE] In Consul service discovery, fill in the `__meta_consul_dc` + datacenter label from the Consul agent when it's not set in the Consul SD + config. +* [FEATURE] Scrape all services upon empty services list in Consul service + discovery. +* [FEATURE] Add `labelmap` relabeling action to map a set of input labels to a + set of output labels using regular expressions. +* [FEATURE] Introduce `__tmp` as a relabeling label prefix that is guaranteed + to not be used by Prometheus internally. +* [FEATURE] Kubernetes-based service discovery. +* [FEATURE] Marathon-based service discovery. +* [FEATURE] Support multiple series names in console graphs JavaScript library. +* [FEATURE] Allow reloading configuration via web handler at `/-/reload`. +* [FEATURE] Updates to promtool to reflect new Prometheus configuration + features. +* [FEATURE] Add `proxy_url` parameter to scrape configurations to enable use of + proxy servers. +* [FEATURE] Add console templates for Prometheus itself. +* [FEATURE] Allow relabeling the protocol scheme of targets. +* [FEATURE] Add `predict_linear()` query language function. +* [FEATURE] Support for authentication using bearer tokens, client certs, and + CA certs. +* [FEATURE] Implement unary expressions for vector types (`-foo`, `+foo`). +* [FEATURE] Add console templates for the SNMP exporter. +* [FEATURE] Make it possible to relabel target scrape query parameters. +* [FEATURE] Add support for `A` and `AAAA` records in DNS service discovery. +* [ENHANCEMENT] Fix several flaky tests. +* [ENHANCEMENT] Switch to common routing package. +* [ENHANCEMENT] Use more resilient metric decoder. +* [ENHANCEMENT] Update vendored dependencies. +* [ENHANCEMENT] Add compression to more HTTP handlers. +* [ENHANCEMENT] Make -web.external-url flag help string more verbose. +* [ENHANCEMENT] Improve metrics around remote storage queues. +* [ENHANCEMENT] Use Go 1.5.1 instead of Go 1.4.2 in builds. +* [ENHANCEMENT] Update the architecture diagram in the `README.md`. +* [ENHANCEMENT] Time out sample appends in retrieval layer if the storage is + backlogging. +* [ENHANCEMENT] Make `hash_mod` relabeling action use MD5 instead of FNV to + enable better hash distribution. +* [ENHANCEMENT] Better tracking of targets between same service discovery + mechanisms in one scrape configuration. +* [ENHANCEMENT] Handle parser and query evaluation runtime panics more + gracefully. +* [ENHANCEMENT] Add IDs to H2 tags on status page to allow anchored linking. +* [BUGFIX] Fix watching multiple paths with Zookeeper serverset discovery. +* [BUGFIX] Fix high CPU usage on configuration reload. +* [BUGFIX] Fix disappearing `__params` on configuration reload. +* [BUGFIX] Make `labelmap` action available through configuration. +* [BUGFIX] Fix direct access of protobuf fields. +* [BUGFIX] Fix panic on Consul request error. +* [BUGFIX] Redirect of graph endpoint for prefixed setups. +* [BUGFIX] Fix series file deletion behavior when purging archived series. +* [BUGFIX] Fix error checking and logging around checkpointing. +* [BUGFIX] Fix map initialization in target manager. +* [BUGFIX] Fix draining of file watcher events in file-based service discovery. +* [BUGFIX] Add `POST` handler for `/debug` endpoints to fix CPU profiling. +* [BUGFIX] Fix several flaky tests. +* [BUGFIX] Fix busylooping in case a scrape configuration has no target + providers defined. +* [BUGFIX] Fix exit behavior of static target provider. +* [BUGFIX] Fix configuration reloading loop upon shutdown. +* [BUGFIX] Add missing check for nil expression in expression parser. +* [BUGFIX] Fix error handling bug in test code. +* [BUGFIX] Fix Consul port meta label. +* [BUGFIX] Fix lexer bug that treated non-Latin Unicode digits as digits. +* [CLEANUP] Remove obsolete federation example from console templates. +* [CLEANUP] Remove duplicated Bootstrap JS inclusion on graph page. +* [CLEANUP] Switch to common log package. +* [CLEANUP] Update build environment scripts and Makefiles to work better with + native Go build mechanisms and new Go 1.5 experimental vendoring support. +* [CLEANUP] Remove logged notice about 0.14.x configuration file format change. +* [CLEANUP] Move scrape-time metric label modification into SampleAppenders. +* [CLEANUP] Switch from `github.com/client_golang/model` to + `github.com/common/model` and related type cleanups. +* [CLEANUP] Switch from `github.com/client_golang/extraction` to + `github.com/common/expfmt` and related type cleanups. +* [CLEANUP] Exit Prometheus when the web server encounters a startup error. +* [CLEANUP] Remove non-functional alert-silencing links on alerting page. +* [CLEANUP] General cleanups to comments and code, derived from `golint`, + `go vet`, or otherwise. +* [CLEANUP] When entering crash recovery, tell users how to cleanly shut down + Prometheus. +* [CLEANUP] Remove internal support for multi-statement queries in query engine. +* [CLEANUP] Update AUTHORS.md. +* [CLEANUP] Don't warn/increment metric upon encountering equal timestamps for + the same series upon append. +* [CLEANUP] Resolve relative paths during configuration loading. + +## 0.15.1 / 2015-07-27 +* [BUGFIX] Fix vector matching behavior when there is a mix of equality and + non-equality matchers in a vector selector and one matcher matches no series. +* [ENHANCEMENT] Allow overriding `GOARCH` and `GOOS` in Makefile.INCLUDE. +* [ENHANCEMENT] Update vendored dependencies. + +## 0.15.0 / 2015-07-21 + +BREAKING CHANGES: + +* Relative paths for rule files are now evaluated relative to the config file. +* External reachability flags (`-web.*`) consolidated. +* The default storage directory has been changed from `/tmp/metrics` + to `data` in the local directory. +* The `rule_checker` tool has been replaced by `promtool` with + different flags and more functionality. +* Empty labels are now removed upon ingestion into the + storage. Matching empty labels is now equivalent to matching unset + labels (`mymetric{label=""}` now matches series that don't have + `label` set at all). +* The special `__meta_consul_tags` label in Consul service discovery + now starts and ends with tag separators to enable easier regex + matching. +* The default scrape interval has been changed back from 1 minute to + 10 seconds. + +All changes: + +* [CHANGE] Change default storage directory to `data` in the current + working directory. +* [CHANGE] Consolidate external reachability flags (`-web.*`)into one. +* [CHANGE] Deprecate `keeping_extra` modifier keyword, rename it to + `keep_common`. +* [CHANGE] Improve label matching performance and treat unset labels + like empty labels in label matchers. +* [CHANGE] Remove `rule_checker` tool and add generic `promtool` CLI + tool which allows checking rules and configuration files. +* [CHANGE] Resolve rule files relative to config file. +* [CHANGE] Restore default ScrapeInterval of 1 minute instead of 10 seconds. +* [CHANGE] Surround `__meta_consul_tags` value with tag separators. +* [CHANGE] Update node disk console for new filesystem labels. +* [FEATURE] Add Consul's `ServiceAddress`, `Address`, and `ServicePort` as + meta labels to enable setting a custom scrape address if needed. +* [FEATURE] Add `hashmod` relabel action to allow for horizontal + sharding of Prometheus servers. +* [FEATURE] Add `honor_labels` scrape configuration option to not + overwrite any labels exposed by the target. +* [FEATURE] Add basic federation support on `/federate`. +* [FEATURE] Add optional `RUNBOOK` field to alert statements. +* [FEATURE] Add pre-relabel target labels to status page. +* [FEATURE] Add version information endpoint under `/version`. +* [FEATURE] Added initial stable API version 1 under `/api/v1`, + including ability to delete series and query more metadata. +* [FEATURE] Allow configuring query parameters when scraping metrics endpoints. +* [FEATURE] Allow deleting time series via the new v1 API. +* [FEATURE] Allow individual ingested metrics to be relabeled. +* [FEATURE] Allow loading rule files from an entire directory. +* [FEATURE] Allow scalar expressions in range queries, improve error messages. +* [FEATURE] Support Zookeeper Serversets as a service discovery mechanism. +* [ENHANCEMENT] Add circleci yaml for Dockerfile test build. +* [ENHANCEMENT] Always show selected graph range, regardless of available data. +* [ENHANCEMENT] Change expression input field to multi-line textarea. +* [ENHANCEMENT] Enforce strict monotonicity of time stamps within a series. +* [ENHANCEMENT] Export build information as metric. +* [ENHANCEMENT] Improve UI of `/alerts` page. +* [ENHANCEMENT] Improve display of target labels on status page. +* [ENHANCEMENT] Improve initialization and routing functionality of web service. +* [ENHANCEMENT] Improve target URL handling and display. +* [ENHANCEMENT] New dockerfile using alpine-glibc base image and make. +* [ENHANCEMENT] Other minor fixes. +* [ENHANCEMENT] Preserve alert state across reloads. +* [ENHANCEMENT] Prettify flag help output even more. +* [ENHANCEMENT] README.md updates. +* [ENHANCEMENT] Raise error on unknown config parameters. +* [ENHANCEMENT] Refine v1 HTTP API output. +* [ENHANCEMENT] Show original configuration file contents on status + page instead of serialized YAML. +* [ENHANCEMENT] Start HUP signal handler earlier to not exit upon HUP + during startup. +* [ENHANCEMENT] Updated vendored dependencies. +* [BUGFIX] Do not panic in `StringToDuration()` on wrong duration unit. +* [BUGFIX] Exit on invalid rule files on startup. +* [BUGFIX] Fix a regression in the `.Path` console template variable. +* [BUGFIX] Fix chunk descriptor loading. +* [BUGFIX] Fix consoles "Prometheus" link to point to / +* [BUGFIX] Fix empty configuration file cases +* [BUGFIX] Fix float to int conversions in chunk encoding, which were + broken for some architectures. +* [BUGFIX] Fix overflow detection for serverset config. +* [BUGFIX] Fix race conditions in retrieval layer. +* [BUGFIX] Fix shutdown deadlock in Consul SD code. +* [BUGFIX] Fix the race condition targets in the Makefile. +* [BUGFIX] Fix value display error in web console. +* [BUGFIX] Hide authentication credentials in config `String()` output. +* [BUGFIX] Increment dirty counter metric in storage only if + `setDirty(true)` is called. +* [BUGFIX] Periodically refresh services in Consul to recover from + missing events. +* [BUGFIX] Prevent overwrite of default global config when loading a + configuration. +* [BUGFIX] Properly lex `\r` as whitespace in expression language. +* [BUGFIX] Validate label names in JSON target groups. +* [BUGFIX] Validate presence of regex field in relabeling configurations. +* [CLEANUP] Clean up initialization of remote storage queues. +* [CLEANUP] Fix `go vet` and `golint` violations. +* [CLEANUP] General cleanup of rules and query language code. +* [CLEANUP] Improve and simplify Dockerfile build steps. +* [CLEANUP] Improve and simplify build infrastructure, use go-bindata + for web assets. Allow building without git. +* [CLEANUP] Move all utility packages into common `util` subdirectory. +* [CLEANUP] Refactor main, flag handling, and web package. +* [CLEANUP] Remove unused methods from `Rule` interface. +* [CLEANUP] Simplify default config handling. +* [CLEANUP] Switch human-readable times on web UI to UTC. +* [CLEANUP] Use `templates.TemplateExpander` for all page templates. +* [CLEANUP] Use new v1 HTTP API for querying and graphing. + +## 0.14.0 / 2015-06-01 +* [CHANGE] Configuration format changed and switched to YAML. + (See the provided [migration tool](https://github.com/prometheus/migrate/releases).) +* [ENHANCEMENT] Redesign of state-preserving target discovery. +* [ENHANCEMENT] Allow specifying scrape URL scheme and basic HTTP auth for non-static targets. +* [FEATURE] Allow attaching meaningful labels to targets via relabeling. +* [FEATURE] Configuration/rule reloading at runtime. +* [FEATURE] Target discovery via file watches. +* [FEATURE] Target discovery via Consul. +* [ENHANCEMENT] Simplified binary operation evaluation. +* [ENHANCEMENT] More stable component initialization. +* [ENHANCEMENT] Added internal expression testing language. +* [BUGFIX] Fix graph links with path prefix. +* [ENHANCEMENT] Allow building from source without git. +* [ENHANCEMENT] Improve storage iterator performance. +* [ENHANCEMENT] Change logging output format and flags. +* [BUGFIX] Fix memory alignment bug for 32bit systems. +* [ENHANCEMENT] Improve web redirection behavior. +* [ENHANCEMENT] Allow overriding default hostname for Prometheus URLs. +* [BUGFIX] Fix double slash in URL sent to alertmanager. +* [FEATURE] Add resets() query function to count counter resets. +* [FEATURE] Add changes() query function to count the number of times a gauge changed. +* [FEATURE] Add increase() query function to calculate a counter's increase. +* [ENHANCEMENT] Limit retrievable samples to the storage's retention window. + +## 0.13.4 / 2015-05-23 +* [BUGFIX] Fix a race while checkpointing fingerprint mappings. + +## 0.13.3 / 2015-05-11 +* [BUGFIX] Handle fingerprint collisions properly. +* [CHANGE] Comments in rules file must start with `#`. (The undocumented `//` + and `/*...*/` comment styles are no longer supported.) +* [ENHANCEMENT] Switch to custom expression language parser and evaluation + engine, which generates better error messages, fixes some parsing edge-cases, + and enables other future enhancements (like the ones below). +* [ENHANCEMENT] Limit maximum number of concurrent queries. +* [ENHANCEMENT] Terminate running queries during shutdown. + +## 0.13.2 / 2015-05-05 +* [MAINTENANCE] Updated vendored dependencies to their newest versions. +* [MAINTENANCE] Include rule_checker and console templates in release tarball. +* [BUGFIX] Sort NaN as the lowest value. +* [ENHANCEMENT] Add square root, stddev and stdvar functions. +* [BUGFIX] Use scrape_timeout for scrape timeout, not scrape_interval. +* [ENHANCEMENT] Improve chunk and chunkDesc loading, increase performance when + reading from disk. +* [BUGFIX] Show correct error on wrong DNS response. + +## 0.13.1 / 2015-04-09 +* [BUGFIX] Treat memory series with zero chunks correctly in series maintenance. +* [ENHANCEMENT] Improve readability of usage text even more. + +## 0.13.0 / 2015-04-08 +* [ENHANCEMENT] Double-delta encoding for chunks, saving typically 40% of + space, both in RAM and on disk. +* [ENHANCEMENT] Redesign of chunk persistence queuing, increasing performance + on spinning disks significantly. +* [ENHANCEMENT] Redesign of sample ingestion, increasing ingestion performance. +* [FEATURE] Added ln, log2, log10 and exp functions to the query language. +* [FEATURE] Experimental write support to InfluxDB. +* [FEATURE] Allow custom timestamps in instant query API. +* [FEATURE] Configurable path prefix for URLs to support proxies. +* [ENHANCEMENT] Increase of rule_checker CLI usability. +* [CHANGE] Show special float values as gaps. +* [ENHANCEMENT] Made usage output more readable. +* [ENHANCEMENT] Increased resilience of the storage against data corruption. +* [ENHANCEMENT] Various improvements around chunk encoding. +* [ENHANCEMENT] Nicer formatting of target health table on /status. +* [CHANGE] Rename UNREACHABLE to UNHEALTHY, ALIVE to HEALTHY. +* [BUGFIX] Strip trailing slash in alertmanager URL. +* [BUGFIX] Avoid +InfYs and similar, just display +Inf. +* [BUGFIX] Fixed HTML-escaping at various places. +* [BUGFIX] Fixed special value handling in division and modulo of the query + language. +* [BUGFIX] Fix embed-static.sh. +* [CLEANUP] Added initial HTTP API tests. +* [CLEANUP] Misc. other code cleanups. +* [MAINTENANCE] Updated vendored dependencies to their newest versions. + +## 0.12.0 / 2015-03-04 +* [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES + ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or + later versions. There is a version guard in place that will prevent you to + run Prometheus with the stored data of an older Prometheus. +* [BUGFIX] The change above fixes a weakness in the fingerprinting algorithm. +* [ENHANCEMENT] The change above makes fingerprinting faster and less allocation + intensive. +* [FEATURE] OR operator and vector matching options. See docs for details. +* [ENHANCEMENT] Scientific notation and special float values (Inf, NaN) now + supported by the expression language. +* [CHANGE] Dockerfile makes Prometheus use the Docker volume to store data + (rather than /tmp/metrics). +* [CHANGE] Makefile uses Go 1.4.2. + +## 0.11.1 / 2015-02-27 +* [BUGFIX] Make series maintenance complete again. (Ever since 0.9.0rc4, + or commit 0851945, series would not be archived, chunk descriptors would + not be evicted, and stale head chunks would never be closed. This happened + due to accidental deletion of a line calling a (well tested :) function. +* [BUGFIX] Do not double count head chunks read from checkpoint on startup. + Also fix a related but less severe bug in counting chunk descriptors. +* [BUGFIX] Check last time in head chunk for head chunk timeout, not first. +* [CHANGE] Update vendoring due to vendoring changes in client_golang. +* [CLEANUP] Code cleanups. +* [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing. + +## 0.11.0 / 2015-02-23 +* [FEATURE] Introduce new metric type Histogram with server-side aggregation. +* [FEATURE] Add offset operator. +* [FEATURE] Add floor, ceil and round functions. +* [CHANGE] Change instance identifiers to be host:port. +* [CHANGE] Dependency management and vendoring changed/improved. +* [CHANGE] Flag name changes to create consistency between various Prometheus + binaries. +* [CHANGE] Show unlimited number of metrics in autocomplete. +* [CHANGE] Add query timeout. +* [CHANGE] Remove labels on persist error counter. +* [ENHANCEMENT] Various performance improvements for sample ingestion. +* [ENHANCEMENT] Various Makefile improvements. +* [ENHANCEMENT] Various console template improvements, including + proof-of-concept for federation via console templates. +* [ENHANCEMENT] Fix graph JS glitches and simplify graphing code. +* [ENHANCEMENT] Dramatically decrease resources for file embedding. +* [ENHANCEMENT] Crash recovery saves lost series data in 'orphaned' directory. +* [BUGFIX] Fix aggregation grouping key calculation. +* [BUGFIX] Fix Go download path for various architectures. +* [BUGFIX] Fixed the link of the Travis build status image. +* [BUGFIX] Fix Rickshaw/D3 version mismatch. +* [CLEANUP] Various code cleanups. + +## 0.10.0 / 2015-01-26 +* [CHANGE] More efficient JSON result format in query API. This requires + up-to-date versions of PromDash and prometheus_cli, too. +* [ENHANCEMENT] Excluded non-minified Bootstrap assets and the Bootstrap maps + from embedding into the binary. Those files are only used for debugging, + and then you can use -web.use-local-assets. By including fewer files, the + RAM usage during compilation is much more manageable. +* [ENHANCEMENT] Help link points to http://prometheus.github.io now. +* [FEATURE] Consoles for haproxy and cloudwatch. +* [BUGFIX] Several fixes to graphs in consoles. +* [CLEANUP] Removed a file size check that did not check anything. + +## 0.9.0 / 2015-01-23 +* [CHANGE] Reworked command line flags, now more consistent and taking into + account needs of the new storage backend (see below). +* [CHANGE] Metric names are dropped after certain transformations. +* [CHANGE] Changed partitioning of summary metrics exported by Prometheus. +* [CHANGE] Got rid of Gerrit as a review tool. +* [CHANGE] 'Tabular' view now the default (rather than 'Graph') to avoid + running very expensive queries accidentally. +* [CHANGE] On-disk format for stored samples changed. For upgrading, you have + to nuke your old files completely. See "Complete rewrite of the storage +* [CHANGE] Removed 2nd argument from `delta`. +* [FEATURE] Added a `deriv` function. +* [FEATURE] Console templates. +* [FEATURE] Added `absent` function. +* [FEATURE] Allow omitting the metric name in queries. +* [BUGFIX] Removed all known race conditions. +* [BUGFIX] Metric mutations now handled correctly in all cases. +* [ENHANCEMENT] Proper double-start protection. +* [ENHANCEMENT] Complete rewrite of the storage layer. Benefits include: + * Better query performance. + * More samples in less RAM. + * Better memory management. + * Scales up to millions of time series and thousands of samples ingested + per second. + * Purging of obsolete samples much cleaner now, up to completely + "forgetting" obsolete time series. + * Proper instrumentation to diagnose the storage layer with... well... + Prometheus. + * Pure Go implementation, no need for cgo and shared C libraries anymore. + * Better concurrency. +* [ENHANCEMENT] Copy-on-write semantics in the AST layer. +* [ENHANCEMENT] Switched from Go 1.3 to Go 1.4. +* [ENHANCEMENT] Vendored external dependencies with godeps. +* [ENHANCEMENT] Numerous Web UI improvements, moved to Bootstrap3 and + Rickshaw 1.5.1. +* [ENHANCEMENT] Improved Docker integration. +* [ENHANCEMENT] Simplified the Makefile contraption. +* [CLEANUP] Put meta-data files into proper shape (LICENSE, README.md etc.) +* [CLEANUP] Removed all legitimate 'go vet' and 'golint' warnings. +* [CLEANUP] Removed dead code. + +## 0.8.0 / 2014-09-04 +* [ENHANCEMENT] Stagger scrapes to spread out load. +* [BUGFIX] Correctly quote HTTP Accept header. + +## 0.7.0 / 2014-08-06 +* [FEATURE] Added new functions: abs(), topk(), bottomk(), drop_common_labels(). +* [FEATURE] Let console templates get graph links from expressions. +* [FEATURE] Allow console templates to dynamically include other templates. +* [FEATURE] Template consoles now have access to their URL. +* [BUGFIX] Fixed time() function to return evaluation time, not wallclock time. +* [BUGFIX] Fixed HTTP connection leak when targets returned a non-200 status. +* [BUGFIX] Fixed link to console templates in UI. +* [PERFORMANCE] Removed extra memory copies while scraping targets. +* [ENHANCEMENT] Switched from Go 1.2.1 to Go 1.3. +* [ENHANCEMENT] Made metrics exported by Prometheus itself more consistent. +* [ENHANCEMENT] Removed incremental backoffs for unhealthy targets. +* [ENHANCEMENT] Dockerfile also builds Prometheus support tools now. + +## 0.6.0 / 2014-06-30 +* [FEATURE] Added console and alert templates support, along with various template functions. +* [PERFORMANCE] Much faster and more memory-efficient flushing to disk. +* [ENHANCEMENT] Query results are now only logged when debugging. +* [ENHANCEMENT] Upgraded to new Prometheus client library for exposing metrics. +* [BUGFIX] Samples are now kept in memory until fully flushed to disk. +* [BUGFIX] Non-200 target scrapes are now treated as an error. +* [BUGFIX] Added installation step for missing dependency to Dockerfile. +* [BUGFIX] Removed broken and unused "User Dashboard" link. + +## 0.5.0 / 2014-05-28 + +* [BUGFIX] Fixed next retrieval time display on status page. +* [BUGFIX] Updated some variable references in tools subdir. +* [FEATURE] Added support for scraping metrics via the new text format. +* [PERFORMANCE] Improved label matcher performance. +* [PERFORMANCE] Removed JSON indentation in query API, leading to smaller response sizes. +* [ENHANCEMENT] Added internal check to verify temporal order of streams. +* [ENHANCEMENT] Some internal refactorings. + +## 0.4.0 / 2014-04-17 + +* [FEATURE] Vectors and scalars may now be reversed in binary operations (` `). +* [FEATURE] It's possible to shutdown Prometheus via a `/-/quit` web endpoint now. +* [BUGFIX] Fix for a deadlock race condition in the memory storage. +* [BUGFIX] Mac OS X build fixed. +* [BUGFIX] Built from Go 1.2.1, which has internal fixes to race conditions in garbage collection handling. +* [ENHANCEMENT] Internal storage interface refactoring that allows building e.g. the `rule_checker` tool without LevelDB dynamic library dependencies. +* [ENHANCEMENT] Cleanups around shutdown handling. +* [PERFORMANCE] Preparations for better memory reuse during marshalling / unmarshalling. diff --git a/src/prometheus/CONTRIBUTING.md b/src/prometheus/CONTRIBUTING.md new file mode 100644 index 0000000..4eceade --- /dev/null +++ b/src/prometheus/CONTRIBUTING.md @@ -0,0 +1,53 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) a suitable maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`low-hanging-fruit`](https://github.com/prometheus/prometheus/issues?q=is%3Aissue+is%3Aopen+label%3A%22low+hanging+fruit%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source) + +For quickly compiling and testing your changes do: +``` +# For building. +go build ./cmd/prometheus/ +./prometheus + +# For testing. +make test # Make sure all the tests pass before you commit and push :) +``` + +All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labelling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions). + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. diff --git a/src/prometheus/Dockerfile b/src/prometheus/Dockerfile new file mode 100644 index 0000000..0536989 --- /dev/null +++ b/src/prometheus/Dockerfile @@ -0,0 +1,23 @@ +FROM arm64v8/busybox:latest +LABEL maintainer "The Prometheus Authors " + +COPY prometheus /bin/prometheus +COPY promtool /bin/promtool +COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml +COPY console_libraries/ /usr/share/prometheus/console_libraries/ +COPY consoles/ /usr/share/prometheus/consoles/ + +RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ +RUN mkdir -p /prometheus && \ + chown -R nobody:nogroup etc/prometheus /prometheus && \ + chmod +x /bin/prometheus /bin/promtool + +USER nobody +EXPOSE 9090 +VOLUME [ "/prometheus" ] +WORKDIR /prometheus +ENTRYPOINT [ "/bin/prometheus" ] +CMD [ "--config.file=/etc/prometheus/prometheus.yml", \ + "--storage.tsdb.path=/prometheus", \ + "--web.console.libraries=/usr/share/prometheus/console_libraries", \ + "--web.console.templates=/usr/share/prometheus/consoles" ] diff --git a/src/prometheus/LICENSE b/src/prometheus/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/src/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/prometheus/MAINTAINERS.md b/src/prometheus/MAINTAINERS.md new file mode 100644 index 0000000..48330b1 --- /dev/null +++ b/src/prometheus/MAINTAINERS.md @@ -0,0 +1,6 @@ +Maintainers of this repository with their focus areas: + +* Brian Brazil @brian-brazil: Console templates; semantics of PromQL, service discovery, and relabeling. +* Fabian Reinartz @fabxc: PromQL parsing and evaluation; implementation of retrieval, alert notification, and service discovery. +* Julius Volz @juliusv: Remote storage integrations; web UI. + diff --git a/src/prometheus/Makefile b/src/prometheus/Makefile new file mode 100644 index 0000000..c243f60 --- /dev/null +++ b/src/prometheus/Makefile @@ -0,0 +1,34 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include Makefile.common + +STATICCHECK_IGNORE = \ + github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go:SA1019 \ + github.com/prometheus/prometheus/discovery/kubernetes/node.go:SA1019 \ + github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/main.go:SA1019 \ + github.com/prometheus/prometheus/pkg/textparse/lex.l.go:SA4006 \ + github.com/prometheus/prometheus/pkg/pool/pool.go:SA6002 \ + github.com/prometheus/prometheus/promql/engine.go:SA6002 + +DOCKER_IMAGE_NAME ?= prometheus + +ifdef DEBUG + bindata_flags = -debug +endif + +assets: + @echo ">> writing assets" + @$(GO) get -u github.com/jteeuwen/go-bindata/... + @go-bindata $(bindata_flags) -pkg ui -o web/ui/bindata.go -ignore '(.*\.map|bootstrap\.js|bootstrap-theme\.css|bootstrap\.css)' web/ui/templates/... web/ui/static/... + @$(GO) fmt ./web/ui \ No newline at end of file diff --git a/src/prometheus/Makefile.common b/src/prometheus/Makefile.common new file mode 100644 index 0000000..353494c --- /dev/null +++ b/src/prometheus/Makefile.common @@ -0,0 +1,100 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +PROMU := $(FIRST_GOPATH)/bin/promu +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +GOVENDOR := $(FIRST_GOPATH)/bin/govendor +pkgs = ./... + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) + +all: style staticcheck unused build test + +style: + @echo ">> checking code style" + ! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' + +check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +test-short: + @echo ">> running short tests" + $(GO) test -short $(pkgs) + +test: + @echo ">> running all tests" + $(GO) test -race $(pkgs) + +format: + @echo ">> formatting code" + $(GO) fmt $(pkgs) + +vet: + @echo ">> vetting code" + $(GO) vet $(pkgs) + +staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" + $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + +unused: $(GOVENDOR) + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' + +build: promu + @echo ">> building binaries" + $(PROMU) build --prefix $(PREFIX) + +tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +docker: + docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + +promu: + GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu + +$(FIRST_GOPATH)/bin/staticcheck: + GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck + +$(FIRST_GOPATH)/bin/govendor: + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor + +.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck govendor $(FIRST_GOPATH)/bin/govendor \ No newline at end of file diff --git a/src/prometheus/NOTICE b/src/prometheus/NOTICE new file mode 100644 index 0000000..2e14135 --- /dev/null +++ b/src/prometheus/NOTICE @@ -0,0 +1,87 @@ +The Prometheus systems and service monitoring server +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +Bootstrap +http://getbootstrap.com +Copyright 2011-2014 Twitter, Inc. +Licensed under the MIT License + +bootstrap3-typeahead.js +https://github.com/bassjobsen/Bootstrap-3-Typeahead +Original written by @mdo and @fat +Copyright 2014 Bass Jobsen @bassjobsen +Licensed under the Apache License, Version 2.0 + +fuzzy +https://github.com/mattyork/fuzzy +Original written by @mattyork +Copyright 2012 Matt York +Licensed under the MIT License + +bootstrap-datetimepicker.js +https://github.com/Eonasdan/bootstrap-datetimepicker +Copyright 2015 Jonathan Peterson (@Eonasdan) +Licensed under the MIT License + +moment.js +https://github.com/moment/moment/ +Copyright JS Foundation and other contributors +Licensed under the MIT License + +Rickshaw +https://github.com/shutterstock/rickshaw +Copyright 2011-2014 by Shutterstock Images, LLC +See https://github.com/shutterstock/rickshaw/blob/master/LICENSE for license details + +mustache.js +https://github.com/janl/mustache.js +Copyright 2009 Chris Wanstrath (Ruby) +Copyright 2010-2014 Jan Lehnardt (JavaScript) +Copyright 2010-2015 The mustache.js community +Licensed under the MIT License + +jQuery +https://jquery.org +Copyright jQuery Foundation and other contributors +Licensed under the MIT License + +Protocol Buffers for Go with Gadgets +http://github.com/gogo/protobuf/ +Copyright (c) 2013, The GoGo Authors. +See source code for license details. + +Go support for leveled logs, analogous to +https://code.google.com/p/google-glog/ +Copyright 2013 Google Inc. +Licensed under the Apache License, Version 2.0 + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +DNS library in Go +http://miek.nl/posts/2014/Aug/16/go-dns-package/ +Copyright 2009 The Go Authors, 2011 Miek Gieben +See https://github.com/miekg/dns/blob/master/LICENSE for license details. + +LevelDB key/value database in Go +https://github.com/syndtr/goleveldb +Copyright 2012 Suryandaru Triandana +See https://github.com/syndtr/goleveldb/blob/master/LICENSE for license details. + +gosnappy - a fork of code.google.com/p/snappy-go +https://github.com/syndtr/gosnappy +Copyright 2011 The Snappy-Go Authors +See https://github.com/syndtr/gosnappy/blob/master/LICENSE for license details. + +go-zookeeper - Native ZooKeeper client for Go +https://github.com/samuel/go-zookeeper +Copyright (c) 2013, Samuel Stauffer +See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details. diff --git a/src/prometheus/README.md b/src/prometheus/README.md new file mode 100644 index 0000000..90e1c78 --- /dev/null +++ b/src/prometheus/README.md @@ -0,0 +1,105 @@ +# Prometheus [![Build Status](https://travis-ci.org/prometheus/prometheus.svg)][travis] + +[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/master.svg?style=shield)][circleci] +[![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay] +[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486) + +Visit [prometheus.io](https://prometheus.io) for the full documentation, +examples and guides. + +Prometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics +from configured targets at given intervals, evaluates rule expressions, +displays the results, and can trigger alerts if some condition is observed +to be true. + +Prometheus' main distinguishing features as compared to other monitoring systems are: + +- a **multi-dimensional** data model (timeseries defined by metric name and set of key/value dimensions) +- a **flexible query language** to leverage this dimensionality +- no dependency on distributed storage; **single server nodes are autonomous** +- timeseries collection happens via a **pull model** over HTTP +- **pushing timeseries** is supported via an intermediary gateway +- targets are discovered via **service discovery** or **static configuration** +- multiple modes of **graphing and dashboarding support** +- support for hierarchical and horizontal **federation** + +## Architecture overview + +![](https://cdn.rawgit.com/prometheus/prometheus/c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg) + +## Install + +There are various ways of installing Prometheus. + +### Precompiled binaries + +Precompiled binaries for released versions are available in the +[*download* section](https://prometheus.io/download/) +on [prometheus.io](https://prometheus.io). Using the latest production release binary +is the recommended way of installing Prometheus. +See the [Installing](https://prometheus.io/docs/introduction/install/) +chapter in the documentation for all the details. + +Debian packages [are available](https://packages.debian.org/sid/net/prometheus). + +### Docker images + +Docker images are available on [Quay.io](https://quay.io/repository/prometheus/prometheus). + +You can launch a Prometheus container for trying it out with + + $ docker run --name prometheus -d -p 127.0.0.1:9090:9090 quay.io/prometheus/prometheus + +Prometheus will now be reachable at http://localhost:9090/. + +### Building from source + +To build Prometheus from the source code yourself you need to have a working +Go environment with [version 1.10 or greater installed](http://golang.org/doc/install). + +You can directly use the `go` tool to download and install the `prometheus` +and `promtool` binaries into your `GOPATH`: + + $ go get github.com/prometheus/prometheus/cmd/... + $ prometheus --config.file=your_config.yml + +You can also clone the repository yourself and build using `make`: + + $ mkdir -p $GOPATH/src/github.com/prometheus + $ cd $GOPATH/src/github.com/prometheus + $ git clone https://github.com/prometheus/prometheus.git + $ cd prometheus + $ make build + $ ./prometheus --config.file=your_config.yml + +The Makefile provides several targets: + + * *build*: build the `prometheus` and `promtool` binaries + * *test*: run the tests + * *test-short*: run the short tests + * *format*: format the source code + * *vet*: check the source code for common errors + * *assets*: rebuild the static assets + * *docker*: build a docker container for the current `HEAD` + +## More information + + * The source code is periodically indexed: [Prometheus Core](http://godoc.org/github.com/prometheus/prometheus). + * You will find a Travis CI configuration in `.travis.yml`. + * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. + +## Contributing + +Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/master/CONTRIBUTING.md) + +## License + +Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/master/LICENSE). + + +[travis]: https://travis-ci.org/prometheus/prometheus +[hub]: https://hub.docker.com/r/prom/prometheus/ +[circleci]: https://circleci.com/gh/prometheus/prometheus +[quay]: https://quay.io/repository/prometheus/prometheus diff --git a/src/prometheus/VERSION b/src/prometheus/VERSION new file mode 100644 index 0000000..2bf1c1c --- /dev/null +++ b/src/prometheus/VERSION @@ -0,0 +1 @@ +2.3.1 diff --git a/src/prometheus/cmd/prometheus/fdlimits_default.go b/src/prometheus/cmd/prometheus/fdlimits_default.go new file mode 100644 index 0000000..197810e --- /dev/null +++ b/src/prometheus/cmd/prometheus/fdlimits_default.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package main + +import ( + "fmt" + "log" + "syscall" +) + +// FdLimits returns the soft and hard limits for file descriptors +func FdLimits() string { + flimit := syscall.Rlimit{} + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &flimit) + if err != nil { + log.Fatal("Error!") + } + return fmt.Sprintf("(soft=%d, hard=%d)", flimit.Cur, flimit.Max) +} diff --git a/src/prometheus/cmd/prometheus/fdlimits_windows.go b/src/prometheus/cmd/prometheus/fdlimits_windows.go new file mode 100644 index 0000000..3fcff49 --- /dev/null +++ b/src/prometheus/cmd/prometheus/fdlimits_windows.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package main + +// FdLimits not supported on Windows +func FdLimits() string { + return "N/A" +} diff --git a/src/prometheus/cmd/prometheus/main.go b/src/prometheus/cmd/prometheus/main.go new file mode 100644 index 0000000..1188a7b --- /dev/null +++ b/src/prometheus/cmd/prometheus/main.go @@ -0,0 +1,698 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The main package for the Prometheus server executable. +package main + +import ( + "context" + "crypto/md5" + "encoding/json" + "fmt" + "net" + "net/http" + _ "net/http/pprof" // Comment this line to disable pprof endpoint. + "net/url" + "os" + "os/signal" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/oklog/pkg/group" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + "gopkg.in/alecthomas/kingpin.v2" + k8s_runtime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/mwitkow/go-conntrack" + "github.com/prometheus/common/promlog" + promlogflag "github.com/prometheus/common/promlog/flag" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + sd_config "github.com/prometheus/prometheus/discovery/config" + "github.com/prometheus/prometheus/notifier" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/storage/tsdb" + "github.com/prometheus/prometheus/util/strutil" + "github.com/prometheus/prometheus/web" +) + +var ( + configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_config_last_reload_successful", + Help: "Whether the last configuration reload attempt was successful.", + }) + configSuccessTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_config_last_reload_success_timestamp_seconds", + Help: "Timestamp of the last successful configuration reload.", + }) +) + +func init() { + prometheus.MustRegister(version.NewCollector("prometheus")) +} + +func main() { + if os.Getenv("DEBUG") != "" { + runtime.SetBlockProfileRate(20) + runtime.SetMutexProfileFraction(20) + } + + cfg := struct { + configFile string + + localStoragePath string + notifier notifier.Options + notifierTimeout model.Duration + web web.Options + tsdb tsdb.Options + lookbackDelta model.Duration + webTimeout model.Duration + queryTimeout model.Duration + queryConcurrency int + RemoteFlushDeadline model.Duration + + prometheusURL string + + logLevel promlog.AllowedLevel + }{ + notifier: notifier.Options{ + Registerer: prometheus.DefaultRegisterer, + }, + } + + a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server") + + a.Version(version.Print("prometheus")) + + a.HelpFlag.Short('h') + + a.Flag("config.file", "Prometheus configuration file path."). + Default("prometheus.yml").StringVar(&cfg.configFile) + + a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). + Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) + + a.Flag("web.read-timeout", + "Maximum duration before timing out read of the request, and closing idle connections."). + Default("5m").SetValue(&cfg.webTimeout) + + a.Flag("web.max-connections", "Maximum number of simultaneous connections."). + Default("512").IntVar(&cfg.web.MaxConnections) + + a.Flag("web.external-url", + "The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically."). + PlaceHolder("").StringVar(&cfg.prometheusURL) + + a.Flag("web.route-prefix", + "Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url."). + PlaceHolder("").StringVar(&cfg.web.RoutePrefix) + + a.Flag("web.user-assets", "Path to static asset directory, available at /user."). + PlaceHolder("").StringVar(&cfg.web.UserAssetsPath) + + a.Flag("web.enable-lifecycle", "Enable shutdown and reload via HTTP request."). + Default("false").BoolVar(&cfg.web.EnableLifecycle) + + a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions."). + Default("false").BoolVar(&cfg.web.EnableAdminAPI) + + a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). + Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) + + a.Flag("web.console.libraries", "Path to the console library directory."). + Default("console_libraries").StringVar(&cfg.web.ConsoleLibrariesPath) + + a.Flag("storage.tsdb.path", "Base path for metrics storage."). + Default("data/").StringVar(&cfg.localStoragePath) + + a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing."). + Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration) + + a.Flag("storage.tsdb.max-block-duration", + "Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period)."). + Hidden().PlaceHolder("").SetValue(&cfg.tsdb.MaxBlockDuration) + + a.Flag("storage.tsdb.retention", "How long to retain samples in storage."). + Default("15d").SetValue(&cfg.tsdb.Retention) + + a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). + Default("false").BoolVar(&cfg.tsdb.NoLockfile) + + a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload."). + Default("1m").PlaceHolder("").SetValue(&cfg.RemoteFlushDeadline) + + a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). + Default("10000").IntVar(&cfg.notifier.QueueCapacity) + + a.Flag("alertmanager.timeout", "Timeout for sending alerts to Alertmanager."). + Default("10s").SetValue(&cfg.notifierTimeout) + + a.Flag("query.lookback-delta", "The delta difference allowed for retrieving metrics during expression evaluations."). + Default("5m").SetValue(&cfg.lookbackDelta) + + a.Flag("query.timeout", "Maximum time a query may take before being aborted."). + Default("2m").SetValue(&cfg.queryTimeout) + + a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently."). + Default("20").IntVar(&cfg.queryConcurrency) + + promlogflag.AddFlags(a, &cfg.logLevel) + + _, err := a.Parse(os.Args[1:]) + if err != nil { + fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Error parsing commandline arguments")) + a.Usage(os.Args[1:]) + os.Exit(2) + } + + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) + if err != nil { + fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL)) + os.Exit(2) + } + + cfg.web.ReadTimeout = time.Duration(cfg.webTimeout) + // Default -web.route-prefix to path of -web.external-url. + if cfg.web.RoutePrefix == "" { + cfg.web.RoutePrefix = cfg.web.ExternalURL.Path + } + // RoutePrefix must always be at least '/'. + cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") + + if cfg.tsdb.MaxBlockDuration == 0 { + cfg.tsdb.MaxBlockDuration = cfg.tsdb.Retention / 10 + } + + promql.LookbackDelta = time.Duration(cfg.lookbackDelta) + + logger := promlog.New(cfg.logLevel) + + // XXX(fabxc): Kubernetes does background logging which we can only customize by modifying + // a global variable. + // Ultimately, here is the best place to set it. + k8s_runtime.ErrorHandlers = []func(error){ + func(err error) { + level.Error(log.With(logger, "component", "k8s_client_runtime")).Log("err", err) + }, + } + + level.Info(logger).Log("msg", "Starting Prometheus", "version", version.Info()) + level.Info(logger).Log("build_context", version.BuildContext()) + level.Info(logger).Log("host_details", Uname()) + level.Info(logger).Log("fd_limits", FdLimits()) + + var ( + localStorage = &tsdb.ReadyStorage{} + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), localStorage.StartTime, time.Duration(cfg.RemoteFlushDeadline)) + fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) + ) + + var ( + ctxWeb, cancelWeb = context.WithCancel(context.Background()) + ctxRule = context.Background() + + notifier = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) + + ctxScrape, cancelScrape = context.WithCancel(context.Background()) + discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape")) + + ctxNotify, cancelNotify = context.WithCancel(context.Background()) + discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify")) + + scrapeManager = scrape.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage) + + queryEngine = promql.NewEngine( + log.With(logger, "component", "query engine"), + prometheus.DefaultRegisterer, + cfg.queryConcurrency, + time.Duration(cfg.queryTimeout), + ) + + ruleManager = rules.NewManager(&rules.ManagerOptions{ + Appendable: fanoutStorage, + QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage), + NotifyFunc: sendAlerts(notifier, cfg.web.ExternalURL.String()), + Context: ctxRule, + ExternalURL: cfg.web.ExternalURL, + Registerer: prometheus.DefaultRegisterer, + Logger: log.With(logger, "component", "rule manager"), + }) + ) + + cfg.web.Context = ctxWeb + cfg.web.TSDB = localStorage.Get + cfg.web.Storage = fanoutStorage + cfg.web.QueryEngine = queryEngine + cfg.web.ScrapeManager = scrapeManager + cfg.web.RuleManager = ruleManager + cfg.web.Notifier = notifier + + cfg.web.Version = &web.PrometheusVersion{ + Version: version.Version, + Revision: version.Revision, + Branch: version.Branch, + BuildUser: version.BuildUser, + BuildDate: version.BuildDate, + GoVersion: version.GoVersion, + } + + cfg.web.Flags = map[string]string{} + + // Exclude kingpin default flags to expose only Prometheus ones. + boilerplateFlags := kingpin.New("", "").Version("") + for _, f := range a.Model().Flags { + if boilerplateFlags.GetFlag(f.Name) != nil { + continue + } + + cfg.web.Flags[f.Name] = f.Value.String() + } + + // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager + webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) + + // Monitor outgoing connections on default transport with conntrack. + http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc( + conntrack.DialWithTracing(), + ) + + reloaders := []func(cfg *config.Config) error{ + remoteStorage.ApplyConfig, + webHandler.ApplyConfig, + // The Scrape and notifier managers need to reload before the Discovery manager as + // they need to read the most updated config when receiving the new targets list. + notifier.ApplyConfig, + scrapeManager.ApplyConfig, + func(cfg *config.Config) error { + c := make(map[string]sd_config.ServiceDiscoveryConfig) + for _, v := range cfg.ScrapeConfigs { + c[v.JobName] = v.ServiceDiscoveryConfig + } + return discoveryManagerScrape.ApplyConfig(c) + }, + func(cfg *config.Config) error { + c := make(map[string]sd_config.ServiceDiscoveryConfig) + for _, v := range cfg.AlertingConfig.AlertmanagerConfigs { + // AlertmanagerConfigs doesn't hold an unique identifier so we use the config hash as the identifier. + b, err := json.Marshal(v) + if err != nil { + return err + } + c[fmt.Sprintf("%x", md5.Sum(b))] = v.ServiceDiscoveryConfig + } + return discoveryManagerNotify.ApplyConfig(c) + }, + func(cfg *config.Config) error { + // Get all rule files matching the configuration oaths. + var files []string + for _, pat := range cfg.RuleFiles { + fs, err := filepath.Glob(pat) + if err != nil { + // The only error can be a bad pattern. + return fmt.Errorf("error retrieving rule files for %s: %s", pat, err) + } + files = append(files, fs...) + } + return ruleManager.Update(time.Duration(cfg.GlobalConfig.EvaluationInterval), files) + }, + } + + prometheus.MustRegister(configSuccess) + prometheus.MustRegister(configSuccessTime) + + // Start all components while we wait for TSDB to open but only load + // initial config and mark ourselves as ready after it completed. + dbOpen := make(chan struct{}) + + // sync.Once is used to make sure we can close the channel at different execution stages(SIGTERM or when the config is loaded). + type closeOnce struct { + C chan struct{} + once sync.Once + Close func() + } + // Wait until the server is ready to handle reloading. + reloadReady := &closeOnce{ + C: make(chan struct{}), + } + reloadReady.Close = func() { + reloadReady.once.Do(func() { + close(reloadReady.C) + }) + } + + var g group.Group + { + // Termination handler. + term := make(chan os.Signal) + signal.Notify(term, os.Interrupt, syscall.SIGTERM) + cancel := make(chan struct{}) + g.Add( + func() error { + // Don't forget to release the reloadReady channel so that waiting blocks can exit normally. + select { + case <-term: + level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...") + reloadReady.Close() + + case <-webHandler.Quit(): + level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") + case <-cancel: + reloadReady.Close() + break + } + return nil + }, + func(err error) { + close(cancel) + }, + ) + } + { + // Scrape discovery manager. + g.Add( + func() error { + err := discoveryManagerScrape.Run() + level.Info(logger).Log("msg", "Scrape discovery manager stopped") + return err + }, + func(err error) { + level.Info(logger).Log("msg", "Stopping scrape discovery manager...") + cancelScrape() + }, + ) + } + { + // Notify discovery manager. + g.Add( + func() error { + err := discoveryManagerNotify.Run() + level.Info(logger).Log("msg", "Notify discovery manager stopped") + return err + }, + func(err error) { + level.Info(logger).Log("msg", "Stopping notify discovery manager...") + cancelNotify() + }, + ) + } + { + // Scrape manager. + g.Add( + func() error { + // When the scrape manager receives a new targets list + // it needs to read a valid config for each job. + // It depends on the config being in sync with the discovery manager so + // we wait until the config is fully loaded. + <-reloadReady.C + + err := scrapeManager.Run(discoveryManagerScrape.SyncCh()) + level.Info(logger).Log("msg", "Scrape manager stopped") + return err + }, + func(err error) { + // Scrape manager needs to be stopped before closing the local TSDB + // so that it doesn't try to write samples to a closed storage. + level.Info(logger).Log("msg", "Stopping scrape manager...") + scrapeManager.Stop() + }, + ) + } + { + // Reload handler. + + // Make sure that sighup handler is registered with a redirect to the channel before the potentially + // long and synchronous tsdb init. + hup := make(chan os.Signal) + signal.Notify(hup, syscall.SIGHUP) + cancel := make(chan struct{}) + g.Add( + func() error { + <-reloadReady.C + + for { + select { + case <-hup: + if err := reloadConfig(cfg.configFile, logger, reloaders...); err != nil { + level.Error(logger).Log("msg", "Error reloading config", "err", err) + } + case rc := <-webHandler.Reload(): + if err := reloadConfig(cfg.configFile, logger, reloaders...); err != nil { + level.Error(logger).Log("msg", "Error reloading config", "err", err) + rc <- err + } else { + rc <- nil + } + case <-cancel: + return nil + } + } + + }, + func(err error) { + close(cancel) + }, + ) + } + { + // Initial configuration loading. + cancel := make(chan struct{}) + g.Add( + func() error { + select { + case <-dbOpen: + break + // In case a shutdown is initiated before the dbOpen is released + case <-cancel: + reloadReady.Close() + return nil + } + + if err := reloadConfig(cfg.configFile, logger, reloaders...); err != nil { + return fmt.Errorf("Error loading config %s", err) + } + + reloadReady.Close() + + webHandler.Ready() + level.Info(logger).Log("msg", "Server is ready to receive web requests.") + <-cancel + return nil + }, + func(err error) { + close(cancel) + }, + ) + } + { + // TSDB. + cancel := make(chan struct{}) + g.Add( + func() error { + level.Info(logger).Log("msg", "Starting TSDB ...") + db, err := tsdb.Open( + cfg.localStoragePath, + log.With(logger, "component", "tsdb"), + prometheus.DefaultRegisterer, + &cfg.tsdb, + ) + if err != nil { + return fmt.Errorf("Opening storage failed %s", err) + } + level.Info(logger).Log("msg", "TSDB started") + + startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000) + localStorage.Set(db, startTimeMargin) + close(dbOpen) + <-cancel + return nil + }, + func(err error) { + if err := fanoutStorage.Close(); err != nil { + level.Error(logger).Log("msg", "Error stopping storage", "err", err) + } + close(cancel) + }, + ) + } + { + // Web handler. + g.Add( + func() error { + if err := webHandler.Run(ctxWeb); err != nil { + return fmt.Errorf("Error starting web server: %s", err) + } + return nil + }, + func(err error) { + // Keep this interrupt before the ruleManager.Stop(). + // Shutting down the query engine before the rule manager will cause pending queries + // to be canceled and ensures a quick shutdown of the rule manager. + cancelWeb() + }, + ) + } + { + // Rule manager. + + // TODO(krasi) refactor ruleManager.Run() to be blocking to avoid using an extra blocking channel. + cancel := make(chan struct{}) + g.Add( + func() error { + ruleManager.Run() + <-cancel + return nil + }, + func(err error) { + ruleManager.Stop() + close(cancel) + }, + ) + } + { + // Notifier. + + // Calling notifier.Stop() before ruleManager.Stop() will cause a panic if the ruleManager isn't running, + // so keep this interrupt after the ruleManager.Stop(). + g.Add( + func() error { + // When the notifier manager receives a new targets list + // it needs to read a valid config for each job. + // It depends on the config being in sync with the discovery manager + // so we wait until the config is fully loaded. + <-reloadReady.C + + notifier.Run(discoveryManagerNotify.SyncCh()) + level.Info(logger).Log("msg", "Notifier manager stopped") + return nil + }, + func(err error) { + notifier.Stop() + }, + ) + } + if err := g.Run(); err != nil { + level.Error(logger).Log("err", err) + } + level.Info(logger).Log("msg", "See you next time!") +} + +func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config) error) (err error) { + level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) + + defer func() { + if err == nil { + configSuccess.Set(1) + configSuccessTime.SetToCurrentTime() + } else { + configSuccess.Set(0) + } + }() + + conf, err := config.LoadFile(filename) + if err != nil { + return fmt.Errorf("couldn't load configuration (--config.file=%s): %v", filename, err) + } + + failed := false + for _, rl := range rls { + if err := rl(conf); err != nil { + level.Error(logger).Log("msg", "Failed to apply configuration", "err", err) + failed = true + } + } + if failed { + return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%s)", filename) + } + return nil +} + +func startsOrEndsWithQuote(s string) bool { + return strings.HasPrefix(s, "\"") || strings.HasPrefix(s, "'") || + strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'") +} + +// computeExternalURL computes a sanitized external URL from a raw input. It infers unset +// URL parts from the OS and the given listen address. +func computeExternalURL(u, listenAddr string) (*url.URL, error) { + if u == "" { + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + _, port, err := net.SplitHostPort(listenAddr) + if err != nil { + return nil, err + } + u = fmt.Sprintf("http://%s:%s/", hostname, port) + } + + if startsOrEndsWithQuote(u) { + return nil, fmt.Errorf("URL must not begin or end with quotes") + } + + eu, err := url.Parse(u) + if err != nil { + return nil, err + } + + ppref := strings.TrimRight(eu.Path, "/") + if ppref != "" && !strings.HasPrefix(ppref, "/") { + ppref = "/" + ppref + } + eu.Path = ppref + + return eu, nil +} + +// sendAlerts implements the rules.NotifyFunc for a Notifier. +// It filters any non-firing alerts from the input. +func sendAlerts(n *notifier.Manager, externalURL string) rules.NotifyFunc { + return func(ctx context.Context, expr string, alerts ...*rules.Alert) error { + var res []*notifier.Alert + + for _, alert := range alerts { + // Only send actually firing alerts. + if alert.State == rules.StatePending { + continue + } + a := ¬ifier.Alert{ + StartsAt: alert.FiredAt, + Labels: alert.Labels, + Annotations: alert.Annotations, + GeneratorURL: externalURL + strutil.TableLinkForExpression(expr), + } + if !alert.ResolvedAt.IsZero() { + a.EndsAt = alert.ResolvedAt + } + res = append(res, a) + } + + if len(alerts) > 0 { + n.Send(res...) + } + return nil + } +} diff --git a/src/prometheus/cmd/prometheus/main_test.go b/src/prometheus/cmd/prometheus/main_test.go new file mode 100644 index 0000000..ee80567 --- /dev/null +++ b/src/prometheus/cmd/prometheus/main_test.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/prometheus/prometheus/util/testutil" +) + +var promPath string +var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") +var promData = filepath.Join(os.TempDir(), "data") + +func TestMain(m *testing.M) { + flag.Parse() + if testing.Short() { + os.Exit(m.Run()) + } + // On linux with a global proxy the tests will fail as the go client(http,grpc) tries to connect through the proxy. + os.Setenv("no_proxy", "localhost,127.0.0.1,0.0.0.0,:") + + var err error + promPath, err = os.Getwd() + if err != nil { + fmt.Printf("can't get current dir :%s \n", err) + os.Exit(1) + } + promPath = filepath.Join(promPath, "prometheus") + + build := exec.Command("go", "build", "-o", promPath) + output, err := build.CombinedOutput() + if err != nil { + fmt.Printf("compilation error :%s \n", output) + os.Exit(1) + } + + exitCode := m.Run() + os.Remove(promPath) + os.RemoveAll(promData) + os.Exit(exitCode) +} + +// As soon as prometheus starts responding to http request should be able to accept Interrupt signals for a graceful shutdown. +func TestStartupInterrupt(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + prom := exec.Command(promPath, "--config.file="+promConfig, "--storage.tsdb.path="+promData) + err := prom.Start() + if err != nil { + t.Errorf("execution error: %v", err) + return + } + + done := make(chan error) + go func() { + done <- prom.Wait() + }() + + var startedOk bool + var stoppedErr error + +Loop: + for x := 0; x < 10; x++ { + // error=nil means prometheus has started so can send the interrupt signal and wait for the grace shutdown. + if _, err := http.Get("http://localhost:9090/graph"); err == nil { + startedOk = true + prom.Process.Signal(os.Interrupt) + select { + case stoppedErr = <-done: + break Loop + case <-time.After(10 * time.Second): + } + break Loop + } + time.Sleep(500 * time.Millisecond) + } + + if !startedOk { + t.Errorf("prometheus didn't start in the specified timeout") + return + } + if err := prom.Process.Kill(); err == nil { + t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") + } else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected! + t.Errorf("prometheus exited with an unexpected error:%v", stoppedErr) + } +} + +func TestComputeExternalURL(t *testing.T) { + tests := []struct { + input string + valid bool + }{ + { + input: "", + valid: true, + }, + { + input: "http://proxy.com/prometheus", + valid: true, + }, + { + input: "'https://url/prometheus'", + valid: false, + }, + { + input: "'relative/path/with/quotes'", + valid: false, + }, + { + input: "http://alertmanager.company.com", + valid: true, + }, + { + input: "https://double--dash.de", + valid: true, + }, + { + input: "'http://starts/with/quote", + valid: false, + }, + { + input: "ends/with/quote\"", + valid: false, + }, + } + + for _, test := range tests { + _, err := computeExternalURL(test.input, "0.0.0.0:9090") + if test.valid { + testutil.Ok(t, err) + } else { + testutil.NotOk(t, err, "input=%q", test.input) + } + } +} diff --git a/src/prometheus/cmd/prometheus/uname_default.go b/src/prometheus/cmd/prometheus/uname_default.go new file mode 100644 index 0000000..bc7f050 --- /dev/null +++ b/src/prometheus/cmd/prometheus/uname_default.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package main + +import "runtime" + +// Uname for any platform other than linux. +func Uname() string { + return "(" + runtime.GOOS + ")" +} diff --git a/src/prometheus/cmd/prometheus/uname_linux.go b/src/prometheus/cmd/prometheus/uname_linux.go new file mode 100644 index 0000000..4a43056 --- /dev/null +++ b/src/prometheus/cmd/prometheus/uname_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + "syscall" +) + +// Uname returns the uname of the host machine. +func Uname() string { + buf := syscall.Utsname{} + err := syscall.Uname(&buf) + if err != nil { + log.Fatal("Error!") + } + + str := "(" + charsToString(buf.Sysname[:]) + str += " " + charsToString(buf.Release[:]) + str += " " + charsToString(buf.Version[:]) + str += " " + charsToString(buf.Machine[:]) + str += " " + charsToString(buf.Nodename[:]) + str += " " + charsToString(buf.Domainname[:]) + ")" + return str +} diff --git a/src/prometheus/cmd/prometheus/uname_linux_int8.go b/src/prometheus/cmd/prometheus/uname_linux_int8.go new file mode 100644 index 0000000..210e55c --- /dev/null +++ b/src/prometheus/cmd/prometheus/uname_linux_int8.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build 386 amd64 arm64 mips64 mips64le mips mipsle +// +build linux + +package main + +func charsToString(ca []int8) string { + s := make([]byte, 0, len(ca)) + for _, c := range ca { + if byte(c) == 0 { + break + } + s = append(s, byte(c)) + } + return string(s) +} diff --git a/src/prometheus/cmd/prometheus/uname_linux_uint8.go b/src/prometheus/cmd/prometheus/uname_linux_uint8.go new file mode 100644 index 0000000..e728292 --- /dev/null +++ b/src/prometheus/cmd/prometheus/uname_linux_uint8.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build arm ppc64 ppc64le s390x +// +build linux + +package main + +func charsToString(ca []uint8) string { + s := make([]byte, 0, len(ca)) + for _, c := range ca { + if byte(c) == 0 { + break + } + s = append(s, byte(c)) + } + return string(s) +} diff --git a/src/prometheus/cmd/promtool/main.go b/src/prometheus/cmd/promtool/main.go new file mode 100644 index 0000000..33ac0f6 --- /dev/null +++ b/src/prometheus/cmd/promtool/main.go @@ -0,0 +1,449 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "gopkg.in/alecthomas/kingpin.v2" + "gopkg.in/yaml.v2" + + "github.com/prometheus/client_golang/api" + "github.com/prometheus/client_golang/api/prometheus/v1" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/util/promlint" +) + +func main() { + app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.") + app.Version(version.Print("promtool")) + app.HelpFlag.Short('h') + + checkCmd := app.Command("check", "Check the resources for validity.") + + checkConfigCmd := checkCmd.Command("config", "Check if the config files are valid or not.") + configFiles := checkConfigCmd.Arg( + "config-files", + "The config files to check.", + ).Required().ExistingFiles() + + checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") + ruleFiles := checkRulesCmd.Arg( + "rule-files", + "The rule files to check.", + ).Required().ExistingFiles() + + checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage) + + updateCmd := app.Command("update", "Update the resources to newer formats.") + updateRulesCmd := updateCmd.Command("rules", "Update rules from the 1.x to 2.x format.") + ruleFilesUp := updateRulesCmd.Arg("rule-files", "The rule files to update.").Required().ExistingFiles() + + queryCmd := app.Command("query", "Run query against a Prometheus server.") + queryInstantCmd := queryCmd.Command("instant", "Run instant query.") + queryServer := queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URL() + queryExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String() + + queryRangeCmd := queryCmd.Command("range", "Run range query.") + queryRangeServer := queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URL() + queryRangeExpr := queryRangeCmd.Arg("expr", "PromQL query expression.").Required().String() + queryRangeBegin := queryRangeCmd.Flag("start", "Query range start time (RFC3339 or Unix timestamp).").String() + queryRangeEnd := queryRangeCmd.Flag("end", "Query range end time (RFC3339 or Unix timestamp).").String() + + switch kingpin.MustParse(app.Parse(os.Args[1:])) { + case checkConfigCmd.FullCommand(): + os.Exit(CheckConfig(*configFiles...)) + + case checkRulesCmd.FullCommand(): + os.Exit(CheckRules(*ruleFiles...)) + + case checkMetricsCmd.FullCommand(): + os.Exit(CheckMetrics()) + + case updateRulesCmd.FullCommand(): + os.Exit(UpdateRules(*ruleFilesUp...)) + + case queryInstantCmd.FullCommand(): + os.Exit(QueryInstant(*queryServer, *queryExpr)) + + case queryRangeCmd.FullCommand(): + os.Exit(QueryRange(*queryRangeServer, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd)) + } + +} + +// CheckConfig validates configuration files. +func CheckConfig(files ...string) int { + failed := false + + for _, f := range files { + ruleFiles, err := checkConfig(f) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + failed = true + } else { + fmt.Printf(" SUCCESS: %d rule files found\n", len(ruleFiles)) + } + fmt.Println() + + for _, rf := range ruleFiles { + if n, err := checkRules(rf); err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + failed = true + } else { + fmt.Printf(" SUCCESS: %d rules found\n", n) + } + fmt.Println() + } + } + if failed { + return 1 + } + return 0 +} + +func checkFileExists(fn string) error { + // Nothing set, nothing to error on. + if fn == "" { + return nil + } + _, err := os.Stat(fn) + return err +} + +func checkConfig(filename string) ([]string, error) { + fmt.Println("Checking", filename) + + cfg, err := config.LoadFile(filename) + if err != nil { + return nil, err + } + + var ruleFiles []string + for _, rf := range cfg.RuleFiles { + rfs, err := filepath.Glob(rf) + if err != nil { + return nil, err + } + // If an explicit file was given, error if it is not accessible. + if !strings.Contains(rf, "*") { + if len(rfs) == 0 { + return nil, fmt.Errorf("%q does not point to an existing file", rf) + } + if err := checkFileExists(rfs[0]); err != nil { + return nil, fmt.Errorf("error checking rule file %q: %s", rfs[0], err) + } + } + ruleFiles = append(ruleFiles, rfs...) + } + + for _, scfg := range cfg.ScrapeConfigs { + if err := checkFileExists(scfg.HTTPClientConfig.BearerTokenFile); err != nil { + return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.HTTPClientConfig.BearerTokenFile, err) + } + + if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig); err != nil { + return nil, err + } + + for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs { + if err := checkTLSConfig(kd.TLSConfig); err != nil { + return nil, err + } + } + + for _, filesd := range scfg.ServiceDiscoveryConfig.FileSDConfigs { + for _, file := range filesd.Files { + files, err := filepath.Glob(file) + if err != nil { + return nil, err + } + if len(files) != 0 { + // There was at least one match for the glob and we can assume checkFileExists + // for all matches would pass, we can continue the loop. + continue + } + fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName) + } + } + } + + return ruleFiles, nil +} + +func checkTLSConfig(tlsConfig config_util.TLSConfig) error { + if err := checkFileExists(tlsConfig.CertFile); err != nil { + return fmt.Errorf("error checking client cert file %q: %s", tlsConfig.CertFile, err) + } + if err := checkFileExists(tlsConfig.KeyFile); err != nil { + return fmt.Errorf("error checking client key file %q: %s", tlsConfig.KeyFile, err) + } + + if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 { + return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) + } + if len(tlsConfig.KeyFile) > 0 && len(tlsConfig.CertFile) == 0 { + return fmt.Errorf("client key file %q specified without client cert file", tlsConfig.KeyFile) + } + + return nil +} + +// CheckRules validates rule files. +func CheckRules(files ...string) int { + failed := false + + for _, f := range files { + if n, errs := checkRules(f); errs != nil { + fmt.Fprintln(os.Stderr, " FAILED:") + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + } + failed = true + } else { + fmt.Printf(" SUCCESS: %d rules found\n", n) + } + fmt.Println() + } + if failed { + return 1 + } + return 0 +} + +func checkRules(filename string) (int, []error) { + fmt.Println("Checking", filename) + + rgs, errs := rulefmt.ParseFile(filename) + if errs != nil { + return 0, errs + } + + numRules := 0 + for _, rg := range rgs.Groups { + numRules += len(rg.Rules) + } + + return numRules, nil +} + +// UpdateRules updates the rule files. +func UpdateRules(files ...string) int { + failed := false + + for _, f := range files { + if err := updateRules(f); err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + failed = true + } + } + + if failed { + return 1 + } + return 0 +} + +func updateRules(filename string) error { + fmt.Println("Updating", filename) + + content, err := ioutil.ReadFile(filename) + if err != nil { + return err + } + + rules, err := promql.ParseStmts(string(content)) + if err != nil { + return err + } + + yamlRG := &rulefmt.RuleGroups{ + Groups: []rulefmt.RuleGroup{{ + Name: filename, + }}, + } + + yamlRules := make([]rulefmt.Rule, 0, len(rules)) + + for _, rule := range rules { + switch r := rule.(type) { + case *promql.AlertStmt: + yamlRules = append(yamlRules, rulefmt.Rule{ + Alert: r.Name, + Expr: r.Expr.String(), + For: model.Duration(r.Duration), + Labels: r.Labels.Map(), + Annotations: r.Annotations.Map(), + }) + case *promql.RecordStmt: + yamlRules = append(yamlRules, rulefmt.Rule{ + Record: r.Name, + Expr: r.Expr.String(), + Labels: r.Labels.Map(), + }) + default: + panic("unknown statement type") + } + } + + yamlRG.Groups[0].Rules = yamlRules + y, err := yaml.Marshal(yamlRG) + if err != nil { + return err + } + + return ioutil.WriteFile(filename+".yml", y, 0666) +} + +var checkMetricsUsage = strings.TrimSpace(` +Pass Prometheus metrics over stdin to lint them for consistency and correctness. + +examples: + +$ cat metrics.prom | promtool check metrics + +$ curl -s http://localhost:9090/metrics | promtool check metrics +`) + +// CheckMetrics performs a linting pass on input metrics. +func CheckMetrics() int { + l := promlint.New(os.Stdin) + problems, err := l.Lint() + if err != nil { + fmt.Fprintln(os.Stderr, "error while linting:", err) + return 1 + } + + for _, p := range problems { + fmt.Fprintln(os.Stderr, p.Metric, p.Text) + } + + if len(problems) > 0 { + return 3 + } + + return 0 +} + +// QueryInstant performs an instant query against a Prometheus server. +func QueryInstant(url *url.URL, query string) int { + config := api.Config{ + Address: url.String(), + } + + // Create new client. + c, err := api.NewClient(config) + if err != nil { + fmt.Fprintln(os.Stderr, "error creating API client:", err) + return 1 + } + + // Run query against client. + api := v1.NewAPI(c) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + val, err := api.Query(ctx, query, time.Now()) + cancel() + if err != nil { + fmt.Fprintln(os.Stderr, "query error:", err) + return 1 + } + + fmt.Println(val.String()) + + return 0 +} + +// QueryRange performs a range query against a Prometheus server. +func QueryRange(url *url.URL, query string, start string, end string) int { + config := api.Config{ + Address: url.String(), + } + + // Create new client. + c, err := api.NewClient(config) + if err != nil { + fmt.Fprintln(os.Stderr, "error creating API client:", err) + return 1 + } + + var stime, etime time.Time + + if end == "" { + etime = time.Now() + } else { + etime, err = parseTime(end) + if err != nil { + fmt.Fprintln(os.Stderr, "error parsing end time:", err) + return 1 + } + } + + if start == "" { + stime = etime.Add(-5 * time.Minute) + } else { + stime, err = parseTime(start) + if err != nil { + fmt.Fprintln(os.Stderr, "error parsing start time:", err) + } + } + + if !stime.Before(etime) { + fmt.Fprintln(os.Stderr, "start time is not before end time") + } + + resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1) + // Convert seconds to nanoseconds such that time.Duration parses correctly. + step := time.Duration(resolution * 1e9) + + // Run query against client. + api := v1.NewAPI(c) + r := v1.Range{Start: stime, End: etime, Step: step} + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + val, err := api.QueryRange(ctx, query, r) + cancel() + + if err != nil { + fmt.Fprintln(os.Stderr, "query error:", err) + return 1 + } + + fmt.Println(val.String()) + return 0 +} + +func parseTime(s string) (time.Time, error) { + if t, err := strconv.ParseFloat(s, 64); err == nil { + s, ns := math.Modf(t) + return time.Unix(int64(s), int64(ns*float64(time.Second))), nil + } + if t, err := time.Parse(time.RFC3339Nano, s); err == nil { + return t, nil + } + return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s) +} diff --git a/src/prometheus/code-of-conduct.md b/src/prometheus/code-of-conduct.md new file mode 100644 index 0000000..9a1aff4 --- /dev/null +++ b/src/prometheus/code-of-conduct.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/src/prometheus/config/config.go b/src/prometheus/config/config.go new file mode 100644 index 0000000..b7a5667 --- /dev/null +++ b/src/prometheus/config/config.go @@ -0,0 +1,693 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "io/ioutil" + "net/url" + "path/filepath" + "regexp" + "strings" + "time" + + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + sd_config "github.com/prometheus/prometheus/discovery/config" + "gopkg.in/yaml.v2" +) + +var ( + patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`) + relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) +) + +// Load parses the YAML input s into a Config. +func Load(s string) (*Config, error) { + cfg := &Config{} + // If the entire config body is empty the UnmarshalYAML method is + // never called. We thus have to set the DefaultConfig at the entry + // point as well. + *cfg = DefaultConfig + + err := yaml.UnmarshalStrict([]byte(s), cfg) + if err != nil { + return nil, err + } + cfg.original = s + return cfg, nil +} + +// LoadFile parses the given YAML file into a Config. +func LoadFile(filename string) (*Config, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + cfg, err := Load(string(content)) + if err != nil { + return nil, fmt.Errorf("parsing YAML file %s: %v", filename, err) + } + resolveFilepaths(filepath.Dir(filename), cfg) + return cfg, nil +} + +// The defaults applied before parsing the respective config sections. +var ( + // DefaultConfig is the default top-level configuration. + DefaultConfig = Config{ + GlobalConfig: DefaultGlobalConfig, + } + + // DefaultGlobalConfig is the default global configuration. + DefaultGlobalConfig = GlobalConfig{ + ScrapeInterval: model.Duration(1 * time.Minute), + ScrapeTimeout: model.Duration(10 * time.Second), + EvaluationInterval: model.Duration(1 * time.Minute), + } + + // DefaultScrapeConfig is the default scrape configuration. + DefaultScrapeConfig = ScrapeConfig{ + // ScrapeTimeout and ScrapeInterval default to the + // configured globals. + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + } + + // DefaultAlertmanagerConfig is the default alertmanager configuration. + DefaultAlertmanagerConfig = AlertmanagerConfig{ + Scheme: "http", + Timeout: 10 * time.Second, + } + + // DefaultRelabelConfig is the default Relabel configuration. + DefaultRelabelConfig = RelabelConfig{ + Action: RelabelReplace, + Separator: ";", + Regex: MustNewRegexp("(.*)"), + Replacement: "$1", + } + + // DefaultRemoteWriteConfig is the default remote write configuration. + DefaultRemoteWriteConfig = RemoteWriteConfig{ + RemoteTimeout: model.Duration(30 * time.Second), + QueueConfig: DefaultQueueConfig, + } + + // DefaultQueueConfig is the default remote queue configuration. + DefaultQueueConfig = QueueConfig{ + // With a maximum of 1000 shards, assuming an average of 100ms remote write + // time and 100 samples per batch, we will be able to push 1M samples/s. + MaxShards: 1000, + MaxSamplesPerSend: 100, + + // By default, buffer 1000 batches, which at 100ms per batch is 1:40mins. At + // 1000 shards, this will buffer 100M samples total. + Capacity: 100 * 1000, + BatchSendDeadline: 5 * time.Second, + + // Max number of times to retry a batch on recoverable errors. + MaxRetries: 10, + MinBackoff: 30 * time.Millisecond, + MaxBackoff: 100 * time.Millisecond, + } + + // DefaultRemoteReadConfig is the default remote read configuration. + DefaultRemoteReadConfig = RemoteReadConfig{ + RemoteTimeout: model.Duration(1 * time.Minute), + } +) + +// Config is the top-level configuration for Prometheus's config files. +type Config struct { + GlobalConfig GlobalConfig `yaml:"global"` + AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` + RuleFiles []string `yaml:"rule_files,omitempty"` + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` + + RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` + RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` + + // original is the input from which the config was parsed. + original string +} + +// resolveFilepaths joins all relative paths in a configuration +// with a given base directory. +func resolveFilepaths(baseDir string, cfg *Config) { + join := func(fp string) string { + if len(fp) > 0 && !filepath.IsAbs(fp) { + fp = filepath.Join(baseDir, fp) + } + return fp + } + + for i, rf := range cfg.RuleFiles { + cfg.RuleFiles[i] = join(rf) + } + + clientPaths := func(scfg *config_util.HTTPClientConfig) { + scfg.BearerTokenFile = join(scfg.BearerTokenFile) + scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile) + scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile) + scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile) + } + sdPaths := func(cfg *sd_config.ServiceDiscoveryConfig) { + for _, kcfg := range cfg.KubernetesSDConfigs { + kcfg.BearerTokenFile = join(kcfg.BearerTokenFile) + kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile) + kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile) + kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile) + } + for _, mcfg := range cfg.MarathonSDConfigs { + mcfg.AuthTokenFile = join(mcfg.AuthTokenFile) + mcfg.HTTPClientConfig.BearerTokenFile = join(mcfg.HTTPClientConfig.BearerTokenFile) + mcfg.HTTPClientConfig.TLSConfig.CAFile = join(mcfg.HTTPClientConfig.TLSConfig.CAFile) + mcfg.HTTPClientConfig.TLSConfig.CertFile = join(mcfg.HTTPClientConfig.TLSConfig.CertFile) + mcfg.HTTPClientConfig.TLSConfig.KeyFile = join(mcfg.HTTPClientConfig.TLSConfig.KeyFile) + } + for _, consulcfg := range cfg.ConsulSDConfigs { + consulcfg.TLSConfig.CAFile = join(consulcfg.TLSConfig.CAFile) + consulcfg.TLSConfig.CertFile = join(consulcfg.TLSConfig.CertFile) + consulcfg.TLSConfig.KeyFile = join(consulcfg.TLSConfig.KeyFile) + } + for _, filecfg := range cfg.FileSDConfigs { + for i, fn := range filecfg.Files { + filecfg.Files[i] = join(fn) + } + } + } + + for _, cfg := range cfg.ScrapeConfigs { + clientPaths(&cfg.HTTPClientConfig) + sdPaths(&cfg.ServiceDiscoveryConfig) + } + for _, cfg := range cfg.AlertingConfig.AlertmanagerConfigs { + clientPaths(&cfg.HTTPClientConfig) + sdPaths(&cfg.ServiceDiscoveryConfig) + } +} + +func (c Config) String() string { + b, err := yaml.Marshal(c) + if err != nil { + return fmt.Sprintf("", err) + } + return string(b) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultConfig + // We want to set c to the defaults and then overwrite it with the input. + // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML + // again, we have to hide it using a type indirection. + type plain Config + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + // If a global block was open but empty the default global config is overwritten. + // We have to restore it here. + if c.GlobalConfig.isZero() { + c.GlobalConfig = DefaultGlobalConfig + } + + for _, rf := range c.RuleFiles { + if !patRulePath.MatchString(rf) { + return fmt.Errorf("invalid rule file path %q", rf) + } + } + // Do global overrides and validate unique names. + jobNames := map[string]struct{}{} + for _, scfg := range c.ScrapeConfigs { + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if scfg.ScrapeInterval == 0 { + scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval + } + if scfg.ScrapeTimeout > scfg.ScrapeInterval { + return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) + } + if scfg.ScrapeTimeout == 0 { + if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { + scfg.ScrapeTimeout = scfg.ScrapeInterval + } else { + scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout + } + } + + if _, ok := jobNames[scfg.JobName]; ok { + return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + } + jobNames[scfg.JobName] = struct{}{} + } + return nil +} + +// GlobalConfig configures values that are used across other configuration +// objects. +type GlobalConfig struct { + // How frequently to scrape targets by default. + ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` + // The default timeout when scraping targets. + ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // How frequently to evaluate rules by default. + EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` + // The labels to add to any timeseries that this Prometheus instance scrapes. + ExternalLabels model.LabelSet `yaml:"external_labels,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Create a clean global config as the previous one was already populated + // by the default due to the YAML parser behavior for empty blocks. + gc := &GlobalConfig{} + type plain GlobalConfig + if err := unmarshal((*plain)(gc)); err != nil { + return err + } + + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if gc.ScrapeInterval == 0 { + gc.ScrapeInterval = DefaultGlobalConfig.ScrapeInterval + } + if gc.ScrapeTimeout > gc.ScrapeInterval { + return fmt.Errorf("global scrape timeout greater than scrape interval") + } + if gc.ScrapeTimeout == 0 { + if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval { + gc.ScrapeTimeout = gc.ScrapeInterval + } else { + gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout + } + } + if gc.EvaluationInterval == 0 { + gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval + } + *c = *gc + return nil +} + +// isZero returns true iff the global config is the zero value. +func (c *GlobalConfig) isZero() bool { + return c.ExternalLabels == nil && + c.ScrapeInterval == 0 && + c.ScrapeTimeout == 0 && + c.EvaluationInterval == 0 +} + +// ScrapeConfig configures a scraping unit for Prometheus. +type ScrapeConfig struct { + // The job name to which the job label is set by default. + JobName string `yaml:"job_name"` + // Indicator whether the scraped metrics should remain unmodified. + HonorLabels bool `yaml:"honor_labels,omitempty"` + // A set of query parameters with which the target is scraped. + Params url.Values `yaml:"params,omitempty"` + // How frequently to scrape the targets of this scrape config. + ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` + // The timeout for scraping targets of this config. + ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // The HTTP resource path on which to fetch metrics from targets. + MetricsPath string `yaml:"metrics_path,omitempty"` + // The URL scheme with which to fetch metrics from targets. + Scheme string `yaml:"scheme,omitempty"` + // More than this many samples post metric-relabelling will cause the scrape to fail. + SampleLimit uint `yaml:"sample_limit,omitempty"` + + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + + ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"` + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` + + // List of target relabel configurations. + RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` + // List of metric relabel configurations. + MetricRelabelConfigs []*RelabelConfig `yaml:"metric_relabel_configs,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultScrapeConfig + type plain ScrapeConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if len(c.JobName) == 0 { + return fmt.Errorf("job_name is empty") + } + + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + if err := c.HTTPClientConfig.Validate(); err != nil { + return err + } + + // Check for users putting URLs in target groups. + if len(c.RelabelConfigs) == 0 { + for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs { + for _, t := range tg.Targets { + if err := CheckTargetAddress(t[model.AddressLabel]); err != nil { + return err + } + } + } + } + + // Add index to the static config target groups for unique identification + // within scrape pool. + for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs { + tg.Source = fmt.Sprintf("%d", i) + } + + return nil +} + +// AlertingConfig configures alerting and alertmanager related configs. +type AlertingConfig struct { + AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"` + AlertmanagerConfigs []*AlertmanagerConfig `yaml:"alertmanagers,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Create a clean global config as the previous one was already populated + // by the default due to the YAML parser behavior for empty blocks. + *c = AlertingConfig{} + type plain AlertingConfig + return unmarshal((*plain)(c)) +} + +// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. +type AlertmanagerConfig struct { + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + + ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"` + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` + + // The URL scheme to use when talking to Alertmanagers. + Scheme string `yaml:"scheme,omitempty"` + // Path prefix to add in front of the push endpoint path. + PathPrefix string `yaml:"path_prefix,omitempty"` + // The timeout used when sending alerts. + Timeout time.Duration `yaml:"timeout,omitempty"` + + // List of Alertmanager relabel configurations. + RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultAlertmanagerConfig + type plain AlertmanagerConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + if err := c.HTTPClientConfig.Validate(); err != nil { + return err + } + + // Check for users putting URLs in target groups. + if len(c.RelabelConfigs) == 0 { + for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs { + for _, t := range tg.Targets { + if err := CheckTargetAddress(t[model.AddressLabel]); err != nil { + return err + } + } + } + } + + // Add index to the static config target groups for unique identification + // within scrape pool. + for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs { + tg.Source = fmt.Sprintf("%d", i) + } + + return nil +} + +// CheckTargetAddress checks if target address is valid. +func CheckTargetAddress(address model.LabelValue) error { + // For now check for a URL, we may want to expand this later. + if strings.Contains(string(address), "/") { + return fmt.Errorf("%q is not a valid hostname", address) + } + return nil +} + +// ClientCert contains client cert credentials. +type ClientCert struct { + Cert string `yaml:"cert"` + Key config_util.Secret `yaml:"key"` +} + +// FileSDConfig is the configuration for file based discovery. +type FileSDConfig struct { + Files []string `yaml:"files"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` +} + +// RelabelAction is the action to be performed on relabeling. +type RelabelAction string + +const ( + // RelabelReplace performs a regex replacement. + RelabelReplace RelabelAction = "replace" + // RelabelKeep drops targets for which the input does not match the regex. + RelabelKeep RelabelAction = "keep" + // RelabelDrop drops targets for which the input does match the regex. + RelabelDrop RelabelAction = "drop" + // RelabelHashMod sets a label to the modulus of a hash of labels. + RelabelHashMod RelabelAction = "hashmod" + // RelabelLabelMap copies labels to other labelnames based on a regex. + RelabelLabelMap RelabelAction = "labelmap" + // RelabelLabelDrop drops any label matching the regex. + RelabelLabelDrop RelabelAction = "labeldrop" + // RelabelLabelKeep drops any label not matching the regex. + RelabelLabelKeep RelabelAction = "labelkeep" +) + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (a *RelabelAction) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + switch act := RelabelAction(strings.ToLower(s)); act { + case RelabelReplace, RelabelKeep, RelabelDrop, RelabelHashMod, RelabelLabelMap, RelabelLabelDrop, RelabelLabelKeep: + *a = act + return nil + } + return fmt.Errorf("unknown relabel action %q", s) +} + +// RelabelConfig is the configuration for relabeling of target label sets. +type RelabelConfig struct { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + // Separator is the string between concatenated values from the source labels. + Separator string `yaml:"separator,omitempty"` + // Regex against which the concatenation is matched. + Regex Regexp `yaml:"regex,omitempty"` + // Modulus to take of the hash of concatenated values from the source labels. + Modulus uint64 `yaml:"modulus,omitempty"` + // TargetLabel is the label to which the resulting string is written in a replacement. + // Regexp interpolation is allowed for the replace action. + TargetLabel string `yaml:"target_label,omitempty"` + // Replacement is the regex replacement pattern to be used. + Replacement string `yaml:"replacement,omitempty"` + // Action is the action to be performed for the relabeling. + Action RelabelAction `yaml:"action,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *RelabelConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRelabelConfig + type plain RelabelConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.Regex.Regexp == nil { + c.Regex = MustNewRegexp("") + } + if c.Modulus == 0 && c.Action == RelabelHashMod { + return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") + } + if (c.Action == RelabelReplace || c.Action == RelabelHashMod) && c.TargetLabel == "" { + return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) + } + if c.Action == RelabelReplace && !relabelTarget.MatchString(c.TargetLabel) { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if c.Action == RelabelLabelMap && !relabelTarget.MatchString(c.Replacement) { + return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) + } + if c.Action == RelabelHashMod && !model.LabelName(c.TargetLabel).IsValid() { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + + if c.Action == RelabelLabelDrop || c.Action == RelabelLabelKeep { + if c.SourceLabels != nil || + c.TargetLabel != DefaultRelabelConfig.TargetLabel || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action) + } + } + + return nil +} + +// Regexp encapsulates a regexp.Regexp and makes it YAML marshallable. +type Regexp struct { + *regexp.Regexp + original string +} + +// NewRegexp creates a new anchored Regexp and returns an error if the +// passed-in regular expression does not compile. +func NewRegexp(s string) (Regexp, error) { + regex, err := regexp.Compile("^(?:" + s + ")$") + return Regexp{ + Regexp: regex, + original: s, + }, err +} + +// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile. +func MustNewRegexp(s string) Regexp { + re, err := NewRegexp(s) + if err != nil { + panic(err) + } + return re +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (re Regexp) MarshalYAML() (interface{}, error) { + if re.original != "" { + return re.original, nil + } + return nil, nil +} + +// RemoteWriteConfig is the configuration for writing to remote storage. +type RemoteWriteConfig struct { + URL *config_util.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + WriteRelabelConfigs []*RelabelConfig `yaml:"write_relabel_configs,omitempty"` + + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` + QueueConfig QueueConfig `yaml:"queue_config,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRemoteWriteConfig + type plain RemoteWriteConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.URL == nil { + return fmt.Errorf("url for remote_write is empty") + } + + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + return c.HTTPClientConfig.Validate() +} + +// QueueConfig is the configuration for the queue used to write to remote +// storage. +type QueueConfig struct { + // Number of samples to buffer per shard before we start dropping them. + Capacity int `yaml:"capacity,omitempty"` + + // Max number of shards, i.e. amount of concurrency. + MaxShards int `yaml:"max_shards,omitempty"` + + // Maximum number of samples per send. + MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` + + // Maximum time sample will wait in buffer. + BatchSendDeadline time.Duration `yaml:"batch_send_deadline,omitempty"` + + // Max number of times to retry a batch on recoverable errors. + MaxRetries int `yaml:"max_retries,omitempty"` + + // On recoverable errors, backoff exponentially. + MinBackoff time.Duration `yaml:"min_backoff,omitempty"` + MaxBackoff time.Duration `yaml:"max_backoff,omitempty"` +} + +// RemoteReadConfig is the configuration for reading from remote storage. +type RemoteReadConfig struct { + URL *config_util.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + ReadRecent bool `yaml:"read_recent,omitempty"` + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` + + // RequiredMatchers is an optional list of equality matchers which have to + // be present in a selector to query the remote read endpoint. + RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRemoteReadConfig + type plain RemoteReadConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.URL == nil { + return fmt.Errorf("url for remote_read is empty") + } + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + return c.HTTPClientConfig.Validate() +} diff --git a/src/prometheus/config/config_default_test.go b/src/prometheus/config/config_default_test.go new file mode 100644 index 0000000..932643b --- /dev/null +++ b/src/prometheus/config/config_default_test.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package config + +const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml" + +var ruleFilesExpectedConf = &Config{ + GlobalConfig: DefaultGlobalConfig, + RuleFiles: []string{ + "testdata/first.rules", + "testdata/rules/second.rules", + "/absolute/third.rules", + }, + original: "", +} diff --git a/src/prometheus/config/config_test.go b/src/prometheus/config/config_test.go new file mode 100644 index 0000000..549a4f9 --- /dev/null +++ b/src/prometheus/config/config_test.go @@ -0,0 +1,796 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "encoding/json" + "io/ioutil" + "net/url" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "github.com/prometheus/prometheus/discovery/azure" + "github.com/prometheus/prometheus/discovery/consul" + "github.com/prometheus/prometheus/discovery/dns" + "github.com/prometheus/prometheus/discovery/ec2" + "github.com/prometheus/prometheus/discovery/file" + "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/discovery/marathon" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/discovery/triton" + "github.com/prometheus/prometheus/discovery/zookeeper" + + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + sd_config "github.com/prometheus/prometheus/discovery/config" + "github.com/prometheus/prometheus/util/testutil" + "gopkg.in/yaml.v2" +) + +func mustParseURL(u string) *config_util.URL { + parsed, err := url.Parse(u) + if err != nil { + panic(err) + } + return &config_util.URL{URL: parsed} +} + +var expectedConf = &Config{ + GlobalConfig: GlobalConfig{ + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EvaluationInterval: model.Duration(30 * time.Second), + + ExternalLabels: model.LabelSet{ + "monitor": "codelab", + "foo": "bar", + }, + }, + + RuleFiles: []string{ + filepath.FromSlash("testdata/first.rules"), + filepath.FromSlash("testdata/my/*.rules"), + }, + + RemoteWriteConfigs: []*RemoteWriteConfig{ + { + URL: mustParseURL("http://remote1/push"), + RemoteTimeout: model.Duration(30 * time.Second), + WriteRelabelConfigs: []*RelabelConfig{ + { + SourceLabels: model.LabelNames{"__name__"}, + Separator: ";", + Regex: MustNewRegexp("expensive.*"), + Replacement: "$1", + Action: RelabelDrop, + }, + }, + QueueConfig: DefaultQueueConfig, + }, + { + URL: mustParseURL("http://remote2/push"), + RemoteTimeout: model.Duration(30 * time.Second), + QueueConfig: DefaultQueueConfig, + }, + }, + + RemoteReadConfigs: []*RemoteReadConfig{ + { + URL: mustParseURL("http://remote1/read"), + RemoteTimeout: model.Duration(1 * time.Minute), + ReadRecent: true, + }, + { + URL: mustParseURL("http://remote3/read"), + RemoteTimeout: model.Duration(1 * time.Minute), + ReadRecent: false, + RequiredMatchers: model.LabelSet{"job": "special"}, + }, + }, + + ScrapeConfigs: []*ScrapeConfig{ + { + JobName: "prometheus", + + HonorLabels: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + HTTPClientConfig: config_util.HTTPClientConfig{ + BearerTokenFile: filepath.FromSlash("testdata/valid_token_file"), + }, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + StaticConfigs: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + {model.AddressLabel: "localhost:9191"}, + }, + Labels: model.LabelSet{ + "my": "label", + "your": "label", + }, + Source: "0", + }, + }, + + FileSDConfigs: []*file.SDConfig{ + { + Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"}, + RefreshInterval: model.Duration(10 * time.Minute), + }, + { + Files: []string{"testdata/bar/*.yaml"}, + RefreshInterval: model.Duration(5 * time.Minute), + }, + }, + }, + + RelabelConfigs: []*RelabelConfig{ + { + SourceLabels: model.LabelNames{"job", "__meta_dns_name"}, + TargetLabel: "job", + Separator: ";", + Regex: MustNewRegexp("(.*)some-[regex]"), + Replacement: "foo-${1}", + Action: RelabelReplace, + }, { + SourceLabels: model.LabelNames{"abc"}, + TargetLabel: "cde", + Separator: ";", + Regex: DefaultRelabelConfig.Regex, + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelReplace, + }, { + TargetLabel: "abc", + Separator: ";", + Regex: DefaultRelabelConfig.Regex, + Replacement: "static", + Action: RelabelReplace, + }, { + TargetLabel: "abc", + Separator: ";", + Regex: MustNewRegexp(""), + Replacement: "static", + Action: RelabelReplace, + }, + }, + }, + { + + JobName: "service-x", + + ScrapeInterval: model.Duration(50 * time.Second), + ScrapeTimeout: model.Duration(5 * time.Second), + SampleLimit: 1000, + + HTTPClientConfig: config_util.HTTPClientConfig{ + BasicAuth: &config_util.BasicAuth{ + Username: "admin_name", + Password: "multiline\nmysecret\ntest", + }, + }, + MetricsPath: "/my_path", + Scheme: "https", + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + DNSSDConfigs: []*dns.SDConfig{ + { + Names: []string{ + "first.dns.address.domain.com", + "second.dns.address.domain.com", + }, + RefreshInterval: model.Duration(15 * time.Second), + Type: "SRV", + }, + { + Names: []string{ + "first.dns.address.domain.com", + }, + RefreshInterval: model.Duration(30 * time.Second), + Type: "SRV", + }, + }, + }, + + RelabelConfigs: []*RelabelConfig{ + { + SourceLabels: model.LabelNames{"job"}, + Regex: MustNewRegexp("(.*)some-[regex]"), + Separator: ";", + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelDrop, + }, + { + SourceLabels: model.LabelNames{"__address__"}, + TargetLabel: "__tmp_hash", + Regex: DefaultRelabelConfig.Regex, + Replacement: DefaultRelabelConfig.Replacement, + Modulus: 8, + Separator: ";", + Action: RelabelHashMod, + }, + { + SourceLabels: model.LabelNames{"__tmp_hash"}, + Regex: MustNewRegexp("1"), + Separator: ";", + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelKeep, + }, + { + Regex: MustNewRegexp("1"), + Separator: ";", + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelLabelMap, + }, + { + Regex: MustNewRegexp("d"), + Separator: ";", + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelLabelDrop, + }, + { + Regex: MustNewRegexp("k"), + Separator: ";", + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelLabelKeep, + }, + }, + MetricRelabelConfigs: []*RelabelConfig{ + { + SourceLabels: model.LabelNames{"__name__"}, + Regex: MustNewRegexp("expensive_metric.*"), + Separator: ";", + Replacement: DefaultRelabelConfig.Replacement, + Action: RelabelDrop, + }, + }, + }, + { + JobName: "service-y", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + ConsulSDConfigs: []*consul.SDConfig{ + { + Server: "localhost:1234", + Token: "mysecret", + Services: []string{"nginx", "cache", "mysql"}, + ServiceTag: "canary", + NodeMeta: map[string]string{"rack": "123"}, + TagSeparator: consul.DefaultSDConfig.TagSeparator, + Scheme: "https", + RefreshInterval: consul.DefaultSDConfig.RefreshInterval, + AllowStale: true, + TLSConfig: config_util.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + CAFile: filepath.FromSlash("testdata/valid_ca_file"), + InsecureSkipVerify: false, + }, + }, + }, + }, + + RelabelConfigs: []*RelabelConfig{ + { + SourceLabels: model.LabelNames{"__meta_sd_consul_tags"}, + Regex: MustNewRegexp("label:([^=]+)=([^,]+)"), + Separator: ",", + TargetLabel: "${1}", + Replacement: "${2}", + Action: RelabelReplace, + }, + }, + }, + { + JobName: "service-z", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + + MetricsPath: "/metrics", + Scheme: "http", + + HTTPClientConfig: config_util.HTTPClientConfig{ + TLSConfig: config_util.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, + + BearerToken: "mysecret", + }, + }, + { + JobName: "service-kubernetes", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + KubernetesSDConfigs: []*kubernetes.SDConfig{ + { + APIServer: kubernetesSDHostURL(), + Role: kubernetes.RoleEndpoint, + BasicAuth: &config_util.BasicAuth{ + Username: "myusername", + Password: "mysecret", + }, + NamespaceDiscovery: kubernetes.NamespaceDiscovery{}, + }, + }, + }, + }, + { + JobName: "service-kubernetes-namespaces", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + KubernetesSDConfigs: []*kubernetes.SDConfig{ + { + APIServer: kubernetesSDHostURL(), + Role: kubernetes.RoleEndpoint, + NamespaceDiscovery: kubernetes.NamespaceDiscovery{ + Names: []string{ + "default", + }, + }, + }, + }, + }, + }, + { + JobName: "service-marathon", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + MarathonSDConfigs: []*marathon.SDConfig{ + { + Servers: []string{ + "https://marathon.example.com:443", + }, + RefreshInterval: model.Duration(30 * time.Second), + AuthToken: config_util.Secret("mysecret"), + HTTPClientConfig: config_util.HTTPClientConfig{ + TLSConfig: config_util.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, + }, + }, + }, + }, + }, + { + JobName: "service-ec2", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + EC2SDConfigs: []*ec2.SDConfig{ + { + Region: "us-east-1", + AccessKey: "access", + SecretKey: "mysecret", + Profile: "profile", + RefreshInterval: model.Duration(60 * time.Second), + Port: 80, + Filters: []*ec2.Filter{ + { + Name: "tag:environment", + Values: []string{"prod"}, + }, + { + Name: "tag:service", + Values: []string{"web", "db"}, + }, + }, + }, + }, + }, + }, + { + JobName: "service-azure", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + AzureSDConfigs: []*azure.SDConfig{ + { + SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11", + TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2", + ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C", + ClientSecret: "mysecret", + RefreshInterval: model.Duration(5 * time.Minute), + Port: 9100, + }, + }, + }, + }, + { + JobName: "service-nerve", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + NerveSDConfigs: []*zookeeper.NerveSDConfig{ + { + Servers: []string{"localhost"}, + Paths: []string{"/monitoring"}, + Timeout: model.Duration(10 * time.Second), + }, + }, + }, + }, + { + JobName: "0123service-xxx", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + StaticConfigs: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + }, + Source: "0", + }, + }, + }, + }, + { + JobName: "測試", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + StaticConfigs: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + }, + Source: "0", + }, + }, + }, + }, + { + JobName: "service-triton", + + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + TritonSDConfigs: []*triton.SDConfig{ + { + + Account: "testAccount", + DNSSuffix: "triton.example.com", + Endpoint: "triton.example.com", + Port: 9163, + RefreshInterval: model.Duration(60 * time.Second), + Version: 1, + TLSConfig: config_util.TLSConfig{ + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, + }, + }, + }, + }, + }, + AlertingConfig: AlertingConfig{ + AlertmanagerConfigs: []*AlertmanagerConfig{ + { + Scheme: "https", + Timeout: 10 * time.Second, + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + StaticConfigs: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "1.2.3.4:9093"}, + {model.AddressLabel: "1.2.3.5:9093"}, + {model.AddressLabel: "1.2.3.6:9093"}, + }, + Source: "0", + }, + }, + }, + }, + }, + }, + original: "", +} + +func TestLoadConfig(t *testing.T) { + // Parse a valid file that sets a global scrape timeout. This tests whether parsing + // an overwritten default field in the global config permanently changes the default. + _, err := LoadFile("testdata/global_timeout.good.yml") + testutil.Ok(t, err) + + c, err := LoadFile("testdata/conf.good.yml") + testutil.Ok(t, err) + + expectedConf.original = c.original + testutil.Equals(t, expectedConf, c) +} + +// YAML marshalling must not reveal authentication credentials. +func TestElideSecrets(t *testing.T) { + c, err := LoadFile("testdata/conf.good.yml") + testutil.Ok(t, err) + + secretRe := regexp.MustCompile(`\\u003csecret\\u003e|`) + + config, err := yaml.Marshal(c) + testutil.Ok(t, err) + yamlConfig := string(config) + + matches := secretRe.FindAllStringIndex(yamlConfig, -1) + testutil.Assert(t, len(matches) == 7, "wrong number of secret matches found") + testutil.Assert(t, !strings.Contains(yamlConfig, "mysecret"), + "yaml marshal reveals authentication credentials.") +} + +func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { + // Parse a valid file that sets a rule files with an absolute path + c, err := LoadFile(ruleFilesConfigFile) + testutil.Ok(t, err) + + ruleFilesExpectedConf.original = c.original + testutil.Equals(t, ruleFilesExpectedConf, c) +} + +var expectedErrors = []struct { + filename string + errMsg string +}{ + { + filename: "jobname.bad.yml", + errMsg: `job_name is empty`, + }, { + filename: "jobname_dup.bad.yml", + errMsg: `found multiple scrape configs with job name "prometheus"`, + }, { + filename: "scrape_interval.bad.yml", + errMsg: `scrape timeout greater than scrape interval`, + }, { + filename: "labelname.bad.yml", + errMsg: `"not$allowed" is not a valid label name`, + }, { + filename: "labelname2.bad.yml", + errMsg: `"not:allowed" is not a valid label name`, + }, { + filename: "regex.bad.yml", + errMsg: "error parsing regexp", + }, { + filename: "modulus_missing.bad.yml", + errMsg: "relabel configuration for hashmod requires non-zero modulus", + }, { + filename: "labelkeep.bad.yml", + errMsg: "labelkeep action requires only 'regex', and no other fields", + }, { + filename: "labelkeep2.bad.yml", + errMsg: "labelkeep action requires only 'regex', and no other fields", + }, { + filename: "labelkeep3.bad.yml", + errMsg: "labelkeep action requires only 'regex', and no other fields", + }, { + filename: "labelkeep4.bad.yml", + errMsg: "labelkeep action requires only 'regex', and no other fields", + }, { + filename: "labelkeep5.bad.yml", + errMsg: "labelkeep action requires only 'regex', and no other fields", + }, { + filename: "labeldrop.bad.yml", + errMsg: "labeldrop action requires only 'regex', and no other fields", + }, { + filename: "labeldrop2.bad.yml", + errMsg: "labeldrop action requires only 'regex', and no other fields", + }, { + filename: "labeldrop3.bad.yml", + errMsg: "labeldrop action requires only 'regex', and no other fields", + }, { + filename: "labeldrop4.bad.yml", + errMsg: "labeldrop action requires only 'regex', and no other fields", + }, { + filename: "labeldrop5.bad.yml", + errMsg: "labeldrop action requires only 'regex', and no other fields", + }, { + filename: "labelmap.bad.yml", + errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", + }, { + filename: "rules.bad.yml", + errMsg: "invalid rule file path", + }, { + filename: "unknown_attr.bad.yml", + errMsg: "field consult_sd_configs not found in type config.plain", + }, { + filename: "bearertoken.bad.yml", + errMsg: "at most one of bearer_token & bearer_token_file must be configured", + }, { + filename: "bearertoken_basicauth.bad.yml", + errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", + }, { + filename: "kubernetes_bearertoken.bad.yml", + errMsg: "at most one of bearer_token & bearer_token_file must be configured", + }, { + filename: "kubernetes_role.bad.yml", + errMsg: "role", + }, { + filename: "kubernetes_namespace_discovery.bad.yml", + errMsg: "field foo not found in type kubernetes.plain", + }, { + filename: "kubernetes_bearertoken_basicauth.bad.yml", + errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", + }, { + filename: "marathon_no_servers.bad.yml", + errMsg: "marathon_sd: must contain at least one Marathon server", + }, { + filename: "marathon_authtoken_authtokenfile.bad.yml", + errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured", + }, { + filename: "marathon_authtoken_basicauth.bad.yml", + errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured", + }, { + filename: "marathon_authtoken_bearertoken.bad.yml", + errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured", + }, { + filename: "url_in_targetgroup.bad.yml", + errMsg: "\"http://bad\" is not a valid hostname", + }, { + filename: "target_label_missing.bad.yml", + errMsg: "relabel configuration for replace action requires 'target_label' value", + }, { + filename: "target_label_hashmod_missing.bad.yml", + errMsg: "relabel configuration for hashmod action requires 'target_label' value", + }, { + filename: "unknown_global_attr.bad.yml", + errMsg: "field nonexistent_field not found in type config.plain", + }, { + filename: "remote_read_url_missing.bad.yml", + errMsg: `url for remote_read is empty`, + }, { + filename: "remote_write_url_missing.bad.yml", + errMsg: `url for remote_write is empty`, + }, + { + filename: "ec2_filters_empty_values.bad.yml", + errMsg: `EC2 SD configuration filter values cannot be empty`, + }, + { + filename: "section_key_dup.bad.yml", + errMsg: "field scrape_configs already set in type config.plain", + }, +} + +func TestBadConfigs(t *testing.T) { + for _, ee := range expectedErrors { + _, err := LoadFile("testdata/" + ee.filename) + testutil.NotOk(t, err, "%s", ee.filename) + testutil.Assert(t, strings.Contains(err.Error(), ee.errMsg), + "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) + } +} + +func TestBadStaticConfigsJSON(t *testing.T) { + content, err := ioutil.ReadFile("testdata/static_config.bad.json") + testutil.Ok(t, err) + var tg targetgroup.Group + err = json.Unmarshal(content, &tg) + testutil.NotOk(t, err, "") +} + +func TestBadStaticConfigsYML(t *testing.T) { + content, err := ioutil.ReadFile("testdata/static_config.bad.yml") + testutil.Ok(t, err) + var tg targetgroup.Group + err = yaml.UnmarshalStrict(content, &tg) + testutil.NotOk(t, err, "") +} + +func TestEmptyConfig(t *testing.T) { + c, err := Load("") + testutil.Ok(t, err) + exp := DefaultConfig + testutil.Equals(t, exp, *c) +} + +func TestEmptyGlobalBlock(t *testing.T) { + c, err := Load("global:\n") + testutil.Ok(t, err) + exp := DefaultConfig + exp.original = "global:\n" + testutil.Equals(t, exp, *c) +} + +func TestTargetLabelValidity(t *testing.T) { + tests := []struct { + str string + valid bool + }{ + {"-label", false}, + {"label", true}, + {"label${1}", true}, + {"${1}label", true}, + {"${1}", true}, + {"${1}label", true}, + {"${", false}, + {"$", false}, + {"${}", false}, + {"foo${", false}, + {"$1", true}, + {"asd$2asd", true}, + {"-foo${1}bar-", false}, + {"_${1}_", true}, + {"foo${bar}foo", true}, + } + for _, test := range tests { + testutil.Assert(t, relabelTarget.Match([]byte(test.str)) == test.valid, + "Expected %q to be %v", test.str, test.valid) + } +} + +func kubernetesSDHostURL() config_util.URL { + tURL, _ := url.Parse("https://localhost:1234") + return config_util.URL{URL: tURL} +} diff --git a/src/prometheus/config/config_windows_test.go b/src/prometheus/config/config_windows_test.go new file mode 100644 index 0000000..ac97d31 --- /dev/null +++ b/src/prometheus/config/config_windows_test.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml" + +var ruleFilesExpectedConf = &Config{ + GlobalConfig: DefaultGlobalConfig, + RuleFiles: []string{ + "testdata\\first.rules", + "testdata\\rules\\second.rules", + "c:\\absolute\\third.rules", + }, + original: "", +} diff --git a/src/prometheus/config/testdata/bearertoken.bad.yml b/src/prometheus/config/testdata/bearertoken.bad.yml new file mode 100644 index 0000000..58efc23 --- /dev/null +++ b/src/prometheus/config/testdata/bearertoken.bad.yml @@ -0,0 +1,6 @@ +scrape_configs: + - job_name: prometheus + + bearer_token: 1234 + bearer_token_file: somefile + diff --git a/src/prometheus/config/testdata/bearertoken_basicauth.bad.yml b/src/prometheus/config/testdata/bearertoken_basicauth.bad.yml new file mode 100644 index 0000000..2584f7f --- /dev/null +++ b/src/prometheus/config/testdata/bearertoken_basicauth.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + + bearer_token: 1234 + basic_auth: + username: user + password: password + diff --git a/src/prometheus/config/testdata/conf.good.yml b/src/prometheus/config/testdata/conf.good.yml new file mode 100644 index 0000000..df57895 --- /dev/null +++ b/src/prometheus/config/testdata/conf.good.yml @@ -0,0 +1,242 @@ +# my global config +global: + scrape_interval: 15s + evaluation_interval: 30s + # scrape_timeout is set to the global default (10s). + + external_labels: + monitor: codelab + foo: bar + +rule_files: +- "first.rules" +- "my/*.rules" + +remote_write: + - url: http://remote1/push + write_relabel_configs: + - source_labels: [__name__] + regex: expensive.* + action: drop + - url: http://remote2/push + +remote_read: + - url: http://remote1/read + read_recent: true + - url: http://remote3/read + read_recent: false + required_matchers: + job: special + +scrape_configs: +- job_name: prometheus + + honor_labels: true + # scrape_interval is defined by the configured global (15s). + # scrape_timeout is defined by the global default (10s). + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + file_sd_configs: + - files: + - foo/*.slow.json + - foo/*.slow.yml + - single/file.yml + refresh_interval: 10m + - files: + - bar/*.yaml + + static_configs: + - targets: ['localhost:9090', 'localhost:9191'] + labels: + my: label + your: label + + relabel_configs: + - source_labels: [job, __meta_dns_name] + regex: (.*)some-[regex] + target_label: job + replacement: foo-${1} + # action defaults to 'replace' + - source_labels: [abc] + target_label: cde + - replacement: static + target_label: abc + - regex: + replacement: static + target_label: abc + + bearer_token_file: valid_token_file + + +- job_name: service-x + + basic_auth: + username: admin_name + password: "multiline\nmysecret\ntest" + + scrape_interval: 50s + scrape_timeout: 5s + + sample_limit: 1000 + + metrics_path: /my_path + scheme: https + + dns_sd_configs: + - refresh_interval: 15s + names: + - first.dns.address.domain.com + - second.dns.address.domain.com + - names: + - first.dns.address.domain.com + # refresh_interval defaults to 30s. + + relabel_configs: + - source_labels: [job] + regex: (.*)some-[regex] + action: drop + - source_labels: [__address__] + modulus: 8 + target_label: __tmp_hash + action: hashmod + - source_labels: [__tmp_hash] + regex: 1 + action: keep + - action: labelmap + regex: 1 + - action: labeldrop + regex: d + - action: labelkeep + regex: k + + metric_relabel_configs: + - source_labels: [__name__] + regex: expensive_metric.* + action: drop + +- job_name: service-y + + consul_sd_configs: + - server: 'localhost:1234' + token: mysecret + services: ['nginx', 'cache', 'mysql'] + tag: "canary" + node_meta: + rack: "123" + allow_stale: true + scheme: https + tls_config: + ca_file: valid_ca_file + cert_file: valid_cert_file + key_file: valid_key_file + insecure_skip_verify: false + + relabel_configs: + - source_labels: [__meta_sd_consul_tags] + separator: ',' + regex: label:([^=]+)=([^,]+) + target_label: ${1} + replacement: ${2} + +- job_name: service-z + + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file + + bearer_token: mysecret + +- job_name: service-kubernetes + + kubernetes_sd_configs: + - role: endpoints + api_server: 'https://localhost:1234' + + basic_auth: + username: 'myusername' + password: 'mysecret' + +- job_name: service-kubernetes-namespaces + + kubernetes_sd_configs: + - role: endpoints + api_server: 'https://localhost:1234' + namespaces: + names: + - default + +- job_name: service-marathon + marathon_sd_configs: + - servers: + - 'https://marathon.example.com:443' + + auth_token: "mysecret" + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file + +- job_name: service-ec2 + ec2_sd_configs: + - region: us-east-1 + access_key: access + secret_key: mysecret + profile: profile + filters: + - name: tag:environment + values: + - prod + + - name: tag:service + values: + - web + - db + +- job_name: service-azure + azure_sd_configs: + - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: mysecret + port: 9100 + +- job_name: service-nerve + nerve_sd_configs: + - servers: + - localhost + paths: + - /monitoring + +- job_name: 0123service-xxx + metrics_path: /metrics + static_configs: + - targets: + - localhost:9090 + +- job_name: 測試 + metrics_path: /metrics + static_configs: + - targets: + - localhost:9090 + +- job_name: service-triton + triton_sd_configs: + - account: 'testAccount' + dns_suffix: 'triton.example.com' + endpoint: 'triton.example.com' + port: 9163 + refresh_interval: 1m + version: 1 + tls_config: + cert_file: testdata/valid_cert_file + key_file: testdata/valid_key_file + +alerting: + alertmanagers: + - scheme: https + static_configs: + - targets: + - "1.2.3.4:9093" + - "1.2.3.5:9093" + - "1.2.3.6:9093" diff --git a/src/prometheus/config/testdata/ec2_filters_empty_values.bad.yml b/src/prometheus/config/testdata/ec2_filters_empty_values.bad.yml new file mode 100644 index 0000000..f375bf5 --- /dev/null +++ b/src/prometheus/config/testdata/ec2_filters_empty_values.bad.yml @@ -0,0 +1,9 @@ +scrape_configs: + - job_name: prometheus + + ec2_sd_configs: + - region: 'us-east-1' + filters: + - name: 'tag:environment' + values: + diff --git a/src/prometheus/config/testdata/first.rules b/src/prometheus/config/testdata/first.rules new file mode 100644 index 0000000..96a4c78 --- /dev/null +++ b/src/prometheus/config/testdata/first.rules @@ -0,0 +1,10 @@ +groups: +- name: my-group-name + rules: + - alert: InstanceDown + expr: up == 0 + for: 1m + labels: + severity: critical + annotations: + description: "stuff's happening with {{ $labels.service }}" diff --git a/src/prometheus/config/testdata/global_timeout.good.yml b/src/prometheus/config/testdata/global_timeout.good.yml new file mode 100644 index 0000000..b1c2054 --- /dev/null +++ b/src/prometheus/config/testdata/global_timeout.good.yml @@ -0,0 +1,3 @@ +global: + scrape_timeout: 1h + scrape_interval: 1h diff --git a/src/prometheus/config/testdata/jobname.bad.yml b/src/prometheus/config/testdata/jobname.bad.yml new file mode 100644 index 0000000..3c3099c --- /dev/null +++ b/src/prometheus/config/testdata/jobname.bad.yml @@ -0,0 +1,2 @@ +scrape_configs: + - job_name: diff --git a/src/prometheus/config/testdata/jobname_dup.bad.yml b/src/prometheus/config/testdata/jobname_dup.bad.yml new file mode 100644 index 0000000..0265493 --- /dev/null +++ b/src/prometheus/config/testdata/jobname_dup.bad.yml @@ -0,0 +1,5 @@ +# Two scrape configs with the same job names are not allowed. +scrape_configs: + - job_name: prometheus + - job_name: service-x + - job_name: prometheus diff --git a/src/prometheus/config/testdata/kubernetes_bearertoken.bad.yml b/src/prometheus/config/testdata/kubernetes_bearertoken.bad.yml new file mode 100644 index 0000000..158de9a --- /dev/null +++ b/src/prometheus/config/testdata/kubernetes_bearertoken.bad.yml @@ -0,0 +1,10 @@ +scrape_configs: + - job_name: prometheus + + kubernetes_sd_configs: + - role: node + api_server: 'https://localhost:1234' + + bearer_token: 1234 + bearer_token_file: somefile + diff --git a/src/prometheus/config/testdata/kubernetes_bearertoken_basicauth.bad.yml b/src/prometheus/config/testdata/kubernetes_bearertoken_basicauth.bad.yml new file mode 100644 index 0000000..ad7cc32 --- /dev/null +++ b/src/prometheus/config/testdata/kubernetes_bearertoken_basicauth.bad.yml @@ -0,0 +1,12 @@ +scrape_configs: + - job_name: prometheus + + kubernetes_sd_configs: + - role: pod + api_server: 'https://localhost:1234' + + bearer_token: 1234 + basic_auth: + username: user + password: password + diff --git a/src/prometheus/config/testdata/kubernetes_namespace_discovery.bad.yml b/src/prometheus/config/testdata/kubernetes_namespace_discovery.bad.yml new file mode 100644 index 0000000..c98d65d --- /dev/null +++ b/src/prometheus/config/testdata/kubernetes_namespace_discovery.bad.yml @@ -0,0 +1,6 @@ +scrape_configs: +- kubernetes_sd_configs: + - api_server: kubernetes:443 + role: endpoints + namespaces: + foo: bar diff --git a/src/prometheus/config/testdata/kubernetes_role.bad.yml b/src/prometheus/config/testdata/kubernetes_role.bad.yml new file mode 100644 index 0000000..ae924d8 --- /dev/null +++ b/src/prometheus/config/testdata/kubernetes_role.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: +- kubernetes_sd_configs: + - api_server: kubernetes:443 + role: vacation + diff --git a/src/prometheus/config/testdata/labeldrop.bad.yml b/src/prometheus/config/testdata/labeldrop.bad.yml new file mode 100644 index 0000000..b71c1e8 --- /dev/null +++ b/src/prometheus/config/testdata/labeldrop.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [abcdef] + action: labeldrop diff --git a/src/prometheus/config/testdata/labeldrop2.bad.yml b/src/prometheus/config/testdata/labeldrop2.bad.yml new file mode 100644 index 0000000..f703169 --- /dev/null +++ b/src/prometheus/config/testdata/labeldrop2.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - modulus: 8 + action: labeldrop diff --git a/src/prometheus/config/testdata/labeldrop3.bad.yml b/src/prometheus/config/testdata/labeldrop3.bad.yml new file mode 100644 index 0000000..5bed5d0 --- /dev/null +++ b/src/prometheus/config/testdata/labeldrop3.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - separator: ',' + action: labeldrop diff --git a/src/prometheus/config/testdata/labeldrop4.bad.yml b/src/prometheus/config/testdata/labeldrop4.bad.yml new file mode 100644 index 0000000..52877d2 --- /dev/null +++ b/src/prometheus/config/testdata/labeldrop4.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - replacement: yolo-{1} + action: labeldrop diff --git a/src/prometheus/config/testdata/labeldrop5.bad.yml b/src/prometheus/config/testdata/labeldrop5.bad.yml new file mode 100644 index 0000000..36f2827 --- /dev/null +++ b/src/prometheus/config/testdata/labeldrop5.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - target_label: yolo + action: labeldrop diff --git a/src/prometheus/config/testdata/labelkeep.bad.yml b/src/prometheus/config/testdata/labelkeep.bad.yml new file mode 100644 index 0000000..709da05 --- /dev/null +++ b/src/prometheus/config/testdata/labelkeep.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [abcdef] + action: labelkeep diff --git a/src/prometheus/config/testdata/labelkeep2.bad.yml b/src/prometheus/config/testdata/labelkeep2.bad.yml new file mode 100644 index 0000000..734e537 --- /dev/null +++ b/src/prometheus/config/testdata/labelkeep2.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - modulus: 8 + action: labelkeep diff --git a/src/prometheus/config/testdata/labelkeep3.bad.yml b/src/prometheus/config/testdata/labelkeep3.bad.yml new file mode 100644 index 0000000..407a0f7 --- /dev/null +++ b/src/prometheus/config/testdata/labelkeep3.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - separator: ',' + action: labelkeep diff --git a/src/prometheus/config/testdata/labelkeep4.bad.yml b/src/prometheus/config/testdata/labelkeep4.bad.yml new file mode 100644 index 0000000..4e77994 --- /dev/null +++ b/src/prometheus/config/testdata/labelkeep4.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - replacement: yolo-{1} + action: labelkeep diff --git a/src/prometheus/config/testdata/labelkeep5.bad.yml b/src/prometheus/config/testdata/labelkeep5.bad.yml new file mode 100644 index 0000000..689399f --- /dev/null +++ b/src/prometheus/config/testdata/labelkeep5.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - target_label: yolo + action: labelkeep diff --git a/src/prometheus/config/testdata/labelmap.bad.yml b/src/prometheus/config/testdata/labelmap.bad.yml new file mode 100644 index 0000000..29d2653 --- /dev/null +++ b/src/prometheus/config/testdata/labelmap.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - action: labelmap + replacement: l-$1 diff --git a/src/prometheus/config/testdata/labelname.bad.yml b/src/prometheus/config/testdata/labelname.bad.yml new file mode 100644 index 0000000..c06853a --- /dev/null +++ b/src/prometheus/config/testdata/labelname.bad.yml @@ -0,0 +1,3 @@ +global: + external_labels: + not$allowed: value diff --git a/src/prometheus/config/testdata/labelname2.bad.yml b/src/prometheus/config/testdata/labelname2.bad.yml new file mode 100644 index 0000000..7afcd6b --- /dev/null +++ b/src/prometheus/config/testdata/labelname2.bad.yml @@ -0,0 +1,3 @@ +global: + external_labels: + 'not:allowed': value diff --git a/src/prometheus/config/testdata/marathon_authtoken_authtokenfile.bad.yml b/src/prometheus/config/testdata/marathon_authtoken_authtokenfile.bad.yml new file mode 100644 index 0000000..b31c6f1 --- /dev/null +++ b/src/prometheus/config/testdata/marathon_authtoken_authtokenfile.bad.yml @@ -0,0 +1,9 @@ +scrape_configs: + - job_name: prometheus + + marathon_sd_configs: + - servers: + - 'https://localhost:1234' + + auth_token: 1234 + auth_token_file: somefile diff --git a/src/prometheus/config/testdata/marathon_authtoken_basicauth.bad.yml b/src/prometheus/config/testdata/marathon_authtoken_basicauth.bad.yml new file mode 100644 index 0000000..64300f4 --- /dev/null +++ b/src/prometheus/config/testdata/marathon_authtoken_basicauth.bad.yml @@ -0,0 +1,11 @@ +scrape_configs: + - job_name: prometheus + + marathon_sd_configs: + - servers: + - 'https://localhost:1234' + + auth_token: 1234 + basic_auth: + username: user + password: password diff --git a/src/prometheus/config/testdata/marathon_authtoken_bearertoken.bad.yml b/src/prometheus/config/testdata/marathon_authtoken_bearertoken.bad.yml new file mode 100644 index 0000000..36eeb80 --- /dev/null +++ b/src/prometheus/config/testdata/marathon_authtoken_bearertoken.bad.yml @@ -0,0 +1,9 @@ +scrape_configs: + - job_name: prometheus + + marathon_sd_configs: + - servers: + - 'https://localhost:1234' + + auth_token: 1234 + bearer_token: 4567 diff --git a/src/prometheus/config/testdata/marathon_no_servers.bad.yml b/src/prometheus/config/testdata/marathon_no_servers.bad.yml new file mode 100644 index 0000000..df245e9 --- /dev/null +++ b/src/prometheus/config/testdata/marathon_no_servers.bad.yml @@ -0,0 +1,10 @@ +# my global config +global: + scrape_interval: 15s + evaluation_interval: 30s + +scrape_configs: + +- job_name: service-marathon + marathon_sd_configs: + - servers: diff --git a/src/prometheus/config/testdata/modulus_missing.bad.yml b/src/prometheus/config/testdata/modulus_missing.bad.yml new file mode 100644 index 0000000..864a57c --- /dev/null +++ b/src/prometheus/config/testdata/modulus_missing.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - regex: abcdef + action: hashmod diff --git a/src/prometheus/config/testdata/regex.bad.yml b/src/prometheus/config/testdata/regex.bad.yml new file mode 100644 index 0000000..4cfe792 --- /dev/null +++ b/src/prometheus/config/testdata/regex.bad.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - regex: abc(def diff --git a/src/prometheus/config/testdata/remote_read_url_missing.bad.yml b/src/prometheus/config/testdata/remote_read_url_missing.bad.yml new file mode 100644 index 0000000..524c1c6 --- /dev/null +++ b/src/prometheus/config/testdata/remote_read_url_missing.bad.yml @@ -0,0 +1,2 @@ +remote_read: + - url: diff --git a/src/prometheus/config/testdata/remote_write_url_missing.bad.yml b/src/prometheus/config/testdata/remote_write_url_missing.bad.yml new file mode 100644 index 0000000..5078019 --- /dev/null +++ b/src/prometheus/config/testdata/remote_write_url_missing.bad.yml @@ -0,0 +1,2 @@ +remote_write: + - url: diff --git a/src/prometheus/config/testdata/rules.bad.yml b/src/prometheus/config/testdata/rules.bad.yml new file mode 100644 index 0000000..b812401 --- /dev/null +++ b/src/prometheus/config/testdata/rules.bad.yml @@ -0,0 +1,3 @@ +rule_files: + - 'my_rule' # fine + - 'my/*/rule' # bad diff --git a/src/prometheus/config/testdata/rules_abs_path.good.yml b/src/prometheus/config/testdata/rules_abs_path.good.yml new file mode 100644 index 0000000..e9d3cf7 --- /dev/null +++ b/src/prometheus/config/testdata/rules_abs_path.good.yml @@ -0,0 +1,4 @@ +rule_files: + - 'first.rules' + - 'rules/second.rules' + - '/absolute/third.rules' diff --git a/src/prometheus/config/testdata/rules_abs_path_windows.good.yml b/src/prometheus/config/testdata/rules_abs_path_windows.good.yml new file mode 100644 index 0000000..667411f --- /dev/null +++ b/src/prometheus/config/testdata/rules_abs_path_windows.good.yml @@ -0,0 +1,4 @@ +rule_files: + - 'first.rules' + - 'rules\second.rules' + - 'c:\absolute\third.rules' diff --git a/src/prometheus/config/testdata/scrape_interval.bad.yml b/src/prometheus/config/testdata/scrape_interval.bad.yml new file mode 100644 index 0000000..b334c77 --- /dev/null +++ b/src/prometheus/config/testdata/scrape_interval.bad.yml @@ -0,0 +1,4 @@ +scrape_configs: +- job_name: prometheus + scrape_interval: 5s + scrape_timeout: 6s diff --git a/src/prometheus/config/testdata/section_key_dup.bad.yml b/src/prometheus/config/testdata/section_key_dup.bad.yml new file mode 100644 index 0000000..35e96aa --- /dev/null +++ b/src/prometheus/config/testdata/section_key_dup.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: 'prometheus_system' + +scrape_configs: + - job_name: 'foo_system' diff --git a/src/prometheus/config/testdata/static_config.bad.json b/src/prometheus/config/testdata/static_config.bad.json new file mode 100644 index 0000000..6050ed9 --- /dev/null +++ b/src/prometheus/config/testdata/static_config.bad.json @@ -0,0 +1,7 @@ +{ + "targets": ["1.2.3.4:9100"], + "labels": { + "some_valid_label": "foo", + "oops:this-label-is-invalid": "bar" + } +} diff --git a/src/prometheus/config/testdata/static_config.bad.yml b/src/prometheus/config/testdata/static_config.bad.yml new file mode 100644 index 0000000..1d229ec --- /dev/null +++ b/src/prometheus/config/testdata/static_config.bad.yml @@ -0,0 +1,4 @@ +targets: ['1.2.3.4:9001', '1.2.3.5:9090'] +labels: + valid_label: foo + not:valid_label: bar diff --git a/src/prometheus/config/testdata/target_label_hashmod_missing.bad.yml b/src/prometheus/config/testdata/target_label_hashmod_missing.bad.yml new file mode 100644 index 0000000..c919ac7 --- /dev/null +++ b/src/prometheus/config/testdata/target_label_hashmod_missing.bad.yml @@ -0,0 +1,6 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [__address__] + modulus: 8 + action: hashmod diff --git a/src/prometheus/config/testdata/target_label_missing.bad.yml b/src/prometheus/config/testdata/target_label_missing.bad.yml new file mode 100644 index 0000000..d7e8de3 --- /dev/null +++ b/src/prometheus/config/testdata/target_label_missing.bad.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - regex: abcdef diff --git a/src/prometheus/config/testdata/unknown_attr.bad.yml b/src/prometheus/config/testdata/unknown_attr.bad.yml new file mode 100644 index 0000000..8a53075 --- /dev/null +++ b/src/prometheus/config/testdata/unknown_attr.bad.yml @@ -0,0 +1,20 @@ +# my global config +global: + scrape_interval: 15s + evaluation_interval: 30s + # scrape_timeout is set to the global default (10s). + + external_labels: + monitor: codelab + foo: bar + +rule_files: + - "first.rules" + - "second.rules" + - "my/*.rules" + +scrape_configs: + - job_name: prometheus + + consult_sd_configs: + - server: 'localhost:1234' diff --git a/src/prometheus/config/testdata/unknown_global_attr.bad.yml b/src/prometheus/config/testdata/unknown_global_attr.bad.yml new file mode 100644 index 0000000..169391f --- /dev/null +++ b/src/prometheus/config/testdata/unknown_global_attr.bad.yml @@ -0,0 +1,2 @@ +global: + nonexistent_field: test diff --git a/src/prometheus/config/testdata/url_in_targetgroup.bad.yml b/src/prometheus/config/testdata/url_in_targetgroup.bad.yml new file mode 100644 index 0000000..a57d757 --- /dev/null +++ b/src/prometheus/config/testdata/url_in_targetgroup.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: +- job_name: prometheus + static_configs: + - targets: + - http://bad diff --git a/src/prometheus/console_libraries/menu.lib b/src/prometheus/console_libraries/menu.lib new file mode 100644 index 0000000..929dc36 --- /dev/null +++ b/src/prometheus/console_libraries/menu.lib @@ -0,0 +1,76 @@ +{{/* vim: set ft=html: */}} + +{{/* Navbar, should be passed . */}} +{{ define "navbar" }} + +{{ end }} + +{{/* LHS menu, should be passed . */}} +{{ define "menu" }} +
+
    +{{ template "_menuItem" (args . "index.html.example" "Overview") }} + +{{ if query "up{job='node'}" }} +{{ template "_menuItem" (args . "node.html" "Node") }} +{{ if match "^node" .Path }} + {{ if .Params.instance }} + + {{ end }} +{{ end }} +{{ end }} + +{{ if query "up{job='prometheus'}" }} +{{ template "_menuItem" (args . "prometheus.html" "Prometheus") }} +{{ if match "^prometheus" .Path }} + {{ if .Params.instance }} + + {{ end }} +{{ end }} +{{ end }} + +
+
+{{ end }} + +{{/* Helper, pass (args . path name) */}} +{{ define "_menuItem" }} +
  • {{ .arg2 }}
  • +{{ end }} + diff --git a/src/prometheus/console_libraries/prom.lib b/src/prometheus/console_libraries/prom.lib new file mode 100644 index 0000000..ff20907 --- /dev/null +++ b/src/prometheus/console_libraries/prom.lib @@ -0,0 +1,135 @@ +{{/* vim: set ft=html: */}} +{{/* Load Prometheus console library JS/CSS. Should go in */}} +{{ define "prom_console_head" }} + + + + + + + + + + + +{{ end }} + +{{/* Top of all pages. */}} +{{ define "head" }} + + +{{ template "prom_console_head" }} + + +{{ template "navbar" . }} +{{ template "menu" . }} +{{ end }} + +{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }} +{{ define "humanize" }}{{ humanize . }}{{ end }} +{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }} +{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }} +{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }} +{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }} +{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }} +{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }} + +{{/* prom_query_drilldown (args expr suffix? renderTemplate?) +Displays the result of the expression, with a link to /graph for it. + +renderTemplate is the name of the template to use to render the value. +*/}} +{{ define "prom_query_drilldown" }} +{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }} +{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }} +{{ end }} + +{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}" + +{{ define "prom_right_table_head" }} +
    + +{{ end }} +{{ define "prom_right_table_tail" }} +
    +
    +{{ end }} + +{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}} +{{ define "prom_right_table_job_head" }} + + {{ . }} + {{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }} + + + CPU + {{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }} + + + Memory + {{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }} + +{{ end }} + + +{{ define "prom_content_head" }} +
    +
    +{{ template "prom_graph_timecontrol" . }} +{{ end }} +{{ define "prom_content_tail" }} +
    +
    +{{ end }} + +{{ define "prom_graph_timecontrol" }} +
    +
    +
    + + + +
    + +
    + + + +
    + +
    +
    + + + +
    +
    +
    + +
    +{{ end }} + +{{/* Bottom of all pages. */}} +{{ define "tail" }} + + +{{ end }} diff --git a/src/prometheus/consoles/index.html.example b/src/prometheus/consoles/index.html.example new file mode 100644 index 0000000..e20e009 --- /dev/null +++ b/src/prometheus/consoles/index.html.example @@ -0,0 +1,28 @@ +{{ template "head" . }} + +{{ template "prom_right_table_head" }} +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_head" . }} +

    Overview

    +

    These are example consoles for Prometheus.

    + +

    These consoles expect exporters to have the following job labels:

    + + + + + + + + + + + + + +
    ExporterJob label
    Node Exporternode
    Prometheusprometheus
    + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/consoles/node-cpu.html b/src/prometheus/consoles/node-cpu.html new file mode 100644 index 0000000..50c46a2 --- /dev/null +++ b/src/prometheus/consoles/node-cpu.html @@ -0,0 +1,60 @@ +{{ template "head" . }} + +{{ template "prom_right_table_head" }} + + CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu{job='node',instance='%s'})))" .Params.instance)) }} + +{{ range printf "sum by (mode)(irate(node_cpu{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }} + + {{ .Labels.mode | title }} CPU + {{ .Value | printf "%.1f" }}% + +{{ end }} + Misc + + Processes Running + {{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }} + + + Processes Blocked + {{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }} + + + Forks + {{ template "prom_query_drilldown" (args (printf "irate(node_forks{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} + + + Context Switches + {{ template "prom_query_drilldown" (args (printf "irate(node_context_switches{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} + + + Interrupts + {{ template "prom_query_drilldown" (args (printf "irate(node_intr{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }} + + + 1m Loadavg + {{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }} + + + +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_head" . }} +

    Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

    + +

    CPU Usage

    +
    + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/consoles/node-disk.html b/src/prometheus/consoles/node-disk.html new file mode 100644 index 0000000..2ef7810 --- /dev/null +++ b/src/prometheus/consoles/node-disk.html @@ -0,0 +1,77 @@ +{{ template "head" . }} + +{{ template "prom_content_head" . }} +

    Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

    + +

    Disk I/O Utilization

    +
    + +

    Filesystem Usage

    +
    + + +{{ template "prom_right_table_head" }} + Disks + +{{ range printf "node_disk_io_time_ms{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} + {{ .Labels.device }} + + Utilization + {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} + + + Throughput + {{ template "prom_query_drilldown" (args (printf "irate(node_disk_sectors_read{job='node',instance='%s',device='%s'}[5m]) * 512 + irate(node_disk_sectors_written{job='node',instance='%s',device='%s'}[5m]) * 512" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} + + + Avg Read Time + {{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 / irate(node_disk_reads_completed{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} + + + Avg Write Time + {{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 / irate(node_disk_writes_completed{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }} + +{{ end }} + Filesystem Fullness + +{{ define "roughlyNearZero" }} +{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} +{{ end }} +{{ range printf "node_filesystem_size{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} + + {{ .Labels.mountpoint }} + {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_free{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} + +{{ end }} + + +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/consoles/node-overview.html b/src/prometheus/consoles/node-overview.html new file mode 100644 index 0000000..ba2c676 --- /dev/null +++ b/src/prometheus/consoles/node-overview.html @@ -0,0 +1,122 @@ +{{ template "head" . }} + +{{ template "prom_content_head" . }} +

    Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}

    + +

    CPU Usage

    +
    + + +

    Disk I/O Utilization

    +
    + + +

    Memory

    +
    + + +{{ template "prom_right_table_head" }} + Overview + + User CPU + {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} + + + System CPU + {{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }} + + + Memory Total + {{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} + + + Memory Free + {{ template "prom_query_drilldown" (args (printf "node_memory_MemFree{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }} + + + Network + +{{ range printf "node_network_receive_bytes{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }} + + {{ .Labels.device }} Received + {{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} + + + {{ .Labels.device }} Transmitted + {{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }} + +{{ end }} + + + Disks + +{{ range printf "node_disk_io_time_ms{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }} + + {{ .Labels.device }} Utilization + {{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 * 100" .Labels.instance .Labels.device) "%" "printf.1f") }} + +{{ end }} +{{ range printf "node_disk_io_time_ms{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} + + {{ .Labels.device }} Throughput + {{ template "prom_query_drilldown" (args (printf "irate(node_disk_sectors_read{job='node',instance='%s',device='%s'}[5m]) * 512 + irate(node_disk_sectors_written{job='node',instance='%s',device='%s'}[5m]) * 512" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }} + +{{ end }} + + Filesystem Fullness + +{{ define "roughlyNearZero" }} +{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} +{{ end }} +{{ range printf "node_filesystem_size{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} + + {{ .Labels.mountpoint }} + {{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_free{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }} + +{{ end }} + +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/consoles/node.html b/src/prometheus/consoles/node.html new file mode 100644 index 0000000..69fe4a9 --- /dev/null +++ b/src/prometheus/consoles/node.html @@ -0,0 +1,34 @@ +{{ template "head" . }} + +{{ template "prom_right_table_head" }} + + Node + {{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }} + +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_head" . }} +

    Node

    + + + + + + + + +{{ range query "up{job='node'}" | sortByLabel "instance" }} + + + Yes{{ else }} class="alert-danger">No{{ end }} + + + +{{ else }} + +{{ end }} + + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/consoles/prometheus-overview.html b/src/prometheus/consoles/prometheus-overview.html new file mode 100644 index 0000000..e871666 --- /dev/null +++ b/src/prometheus/consoles/prometheus-overview.html @@ -0,0 +1,95 @@ +{{ template "head" . }} + +{{ template "prom_right_table_head" }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{{ range printf "http_request_duration_microseconds_count{job='prometheus',instance='%s',handler=~'^(query.*|federate|consoles)$'}" .Params.instance | query | sortByLabel "handler" }} + + + + +{{ end }} + +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_head" . }} +

    Prometheus Overview - {{ .Params.instance }}

    + +

    Ingested Samples

    +
    + + +

    HTTP Server

    +
    + + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/consoles/prometheus.html b/src/prometheus/consoles/prometheus.html new file mode 100644 index 0000000..b85aa35 --- /dev/null +++ b/src/prometheus/consoles/prometheus.html @@ -0,0 +1,33 @@ +{{ template "head" . }} + +{{ template "prom_right_table_head" }} + + + + +{{ template "prom_right_table_tail" }} + +{{ template "prom_content_head" . }} +

    Prometheus

    + +
    NodeUpCPU
    Used
    Memory
    Available
    {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance)(irate(node_cpu{job='node',mode='idle',instance='%s'}[5m])))" .Labels.instance) "%" "printf.1f") }}{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree{job='node',instance='%s'} + node_memory_Cached{job='node',instance='%s'} + node_memory_Buffers{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}
    No nodes found.
    Overview
    CPU{{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }}
    Memory{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }}
    Version{{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}}
    Storage
    Ingested Samples{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }}
    Head Series{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}
    Blocks Loaded{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}
    Rules
    Evaluation Duration{{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}
    Notification Latency{{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}
    Notification Queue{{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}
    HTTP Server
    {{ .Labels.handler }}{{ template "prom_query_drilldown" (args (printf "irate(http_request_duration_microseconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}
    Prometheus{{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }}
    + + + + + + +{{ range query "up{job='prometheus'}" | sortByLabel "instance" }} + + + + + + +{{ else }} + +{{ end }} + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/src/prometheus/discovery/README.md b/src/prometheus/discovery/README.md new file mode 100644 index 0000000..3668913 --- /dev/null +++ b/src/prometheus/discovery/README.md @@ -0,0 +1,219 @@ +### Service Discovery + +This directory contains the service discovery (SD) component of Prometheus. + +There is currently a moratorium on new service discovery mechanisms being added +to Prometheus due to a lack of developer capacity. In the meantime `file_sd` +remains available. + +## Design of a Prometheus SD + +There are many requests to add new SDs to Prometheus, this section looks at +what makes a good SD and covers some of the common implementation issues. + +### Does this make sense as an SD? + +The first question to be asked is does it make sense to add this particular +SD? An SD mechanism should be reasonably well established, and at a minimum in +use across multiple organisations. It should allow discovering of machines +and/or services running somewhere. When exactly an SD is popular enough to +justify being added to Prometheus natively is an open question. + +It should not be a brand new SD mechanism, or a variant of an established +mechanism. We want to integrate Prometheus with the SD that's already there in +your infrastructure, not invent yet more ways to do service discovery. We also +do not add mechanisms to work around users lacking service discovery and/or +configuration management infrastructure. + +SDs that merely discover other applications running the same software (e.g. +talk to one Kafka or Cassandra server to find the others) are not service +discovery. In that case the SD you should be looking at is whatever decides +that a machine is going to be a Kafka server, likely a machine database or +configuration management system. + +If something is particularly custom or unusual, `file_sd` is the generic +mechanism provided for users to hook in. Generally with Prometheus we offer a +single generic mechanism for things with infinite variations, rather than +trying to support everything natively (see also, alertmanager webhook, remote +read, remote write, node exporter textfile collector). For example anything +that would involve talking to a relational database should use `file_sd` +instead. + +For configuration management systems like Chef, while they do have a +database/API that'd in principle make sense to talk to for service discovery, +the idiomatic approach is to use Chef's templating facilities to write out a +file for use with `file_sd`. + + +### Mapping from SD to Prometheus + +The general principle with SD is to extract all the potentially useful +information we can out of the SD, and let the user choose what they need of it +using +[relabelling](https://prometheus.io/docs/operating/configuration/#). +This information is generally termed metadata. + +Metadata is exposed as a set of key/value pairs (labels) per target. The keys +are prefixed with `__meta__`, and there should also be an `__address__` +label with the host:port of the target (preferably an IP address to avoid DNS +lookups). No other labelnames should be exposed. + +It is very common for initial pull requests for new SDs to include hardcoded +assumptions that make sense for the the author's setup. SD should be generic, +any customisation should be handled via relabelling. There should be basically +no business logic, filtering, or transformations of the data from the SD beyond +that which is needed to fit it into the metadata data model. + +Arrays (e.g. a list of tags) should be converted to a single label with the +array values joined with a comma. Also prefix and suffix the value with a +comma. So for example the array `[a, b, c]` would become `,a,b,c,`. As +relabelling regexes are fully anchored, this makes it easier to write correct +regexes against (`.*,a,.*` works no matter where `a` appears in the list). The +canonical example of this is `__meta_consul_tags`. + +Maps, hashes and other forms of key/value pairs should be all prefixed and +exposed as labels. For example for EC2 tags, there would be +`__meta_ec2_tag_Description=mydescription` for the Description tag. Labelnames +may only contain `[_a-zA-Z0-9]`, sanitize by replacing with underscores as needed. + +For targets with multiple potential ports, you can a) expose them as a list, b) +if they're named expose them as a map or c) expose them each as their own +target. Kubernetes SD takes the target per port approach. a) and b) can be +combined. + +For machine-like SDs (OpenStack, EC2, Kubernetes to some extent) there may +be multiple network interfaces for a target. Thus far reporting the details +of only the first/primary network interface has sufficed. + + +### Other implementation considerations + +SDs are intended to dump all possible targets. For example the optional use of +EC2 service discovery would be to take the entire region's worth of EC2 +instances it provides and do everything needed in one `scrape_config`. For +large deployments where you are only interested in a small proportion of the +returned targets, this may cause performance issues. If this occurs it is +acceptable to also offer filtering via whatever mechanisms the SD exposes. For +EC2 that would be the `Filter` option on `DescribeInstances`. Keep in mind that +this is a performance optimisation, it should be possible to do the same +filtering using relabelling alone. As with SD generally, we do not invent new +ways to filter targets (that is what relabelling is for), merely offer up +whatever functionality the SD itself offers. + +It is a general rule with Prometheus that all configuration comes from the +configuration file. While the libraries you use to talk to the SD may also +offer other mechanisms for providing configuration/authentication under the +covers (EC2's use of environment variables being a prime example), using your SD +mechanism should not require this. Put another way, your SD implementation +should not read environment variables or files to obtain configuration. + +Some SD mechanisms have rate limits that make them challenging to use. As an +example we have unfortunately had to reject Amazon ECS service discovery due to +the rate limits being so low that it would not be usable for anything beyond +small setups. + +If a system offers multiple distinct types of SD, select which is in use with a +configuration option rather than returning them all from one mega SD that +requires relabelling to select just the one you want. So far we have only seen +this with Kubernetes. When a single SD with a selector vs. multiple distinct +SDs makes sense is an open question. + +If there is a failure while processing talking to the SD, abort rather than +returning partial data. It is better to work from stale targets than partial +or incorrect metadata. + +The information obtained from service discovery is not considered sensitive +security wise. Do not return secrets in metadata, anyone with access to +the Prometheus server will be able to see them. + + +## Writing an SD mechanism + +### The SD interface + +A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [`TargetGroup`](https://godoc.org/github.com/prometheus/prometheus/config#TargetGroup). The SD mechanism sends the targets down to prometheus as list of `TargetGroups`. + +An SD mechanism has to implement the `Discoverer` Interface: +```go +type Discoverer interface { + Run(ctx context.Context, up chan<- []*config.TargetGroup) +} +``` + +Prometheus will call the `Run()` method on a provider to initialise the discovery mechanism. The mechanism will then send *all* the `TargetGroup`s into the channel. +Now the mechanism will watch for changes. For each update it can send all `TargetGroup`s, or only changed and new `TargetGroup`s, down the channel. `Manager` will handle +both cases. + +For example if we had a discovery mechanism and it retrieves the following groups: + +``` +[]config.TargetGroup{ + { + Targets: []model.LabelSet{ + { + "__instance__": "10.11.150.1:7870", + "hostname": "demo-target-1", + "test": "simple-test", + }, + { + "__instance__": "10.11.150.4:7870", + "hostname": "demo-target-2", + "test": "simple-test", + }, + }, + Labels: map[LabelName][LabelValue] { + "job": "mysql", + }, + "Source": "file1", + }, + { + Targets: []model.LabelSet{ + { + "__instance__": "10.11.122.11:6001", + "hostname": "demo-postgres-1", + "test": "simple-test", + }, + { + "__instance__": "10.11.122.15:6001", + "hostname": "demo-postgres-2", + "test": "simple-test", + }, + }, + Labels: map[LabelName][LabelValue] { + "job": "postgres", + }, + "Source": "file2", + }, +} +``` + +Here there are two `TargetGroups` one group with source `file1` and another with `file2`. The grouping is implementation specific and could even be one target per group. But, one has to make sure every target group sent by an SD instance should have a `Source` which is unique across all the `TargetGroup`s of that SD instance. + +In this case, both the `TargetGroup`s are sent down the channel the first time `Run()` is called. Now, for an update, we need to send the whole _changed_ `TargetGroup` down the channel. i.e, if the target with `hostname: demo-postgres-2` goes away, we send: +``` +&config.TargetGroup{ + Targets: []model.LabelSet{ + { + "__instance__": "10.11.122.11:6001", + "hostname": "demo-postgres-1", + "test": "simple-test", + }, + }, + Labels: map[LabelName][LabelValue] { + "job": "postgres", + }, + "Source": "file2", +} +``` +down the channel. + +If all the targets in a group go away, we need to send the target groups with empty `Targets` down the channel. i.e, if all targets with `job: postgres` go away, we send: +``` +&config.TargetGroup{ + Targets: nil, + "Source": "file2", +} +``` +down the channel. + + diff --git a/src/prometheus/discovery/azure/azure.go b/src/prometheus/discovery/azure/azure.go new file mode 100644 index 0000000..aa7b865 --- /dev/null +++ b/src/prometheus/discovery/azure/azure.go @@ -0,0 +1,317 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/go-autorest/autorest/azure" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + azureLabel = model.MetaLabelPrefix + "azure_" + azureLabelMachineID = azureLabel + "machine_id" + azureLabelMachineResourceGroup = azureLabel + "machine_resource_group" + azureLabelMachineName = azureLabel + "machine_name" + azureLabelMachineOSType = azureLabel + "machine_os_type" + azureLabelMachineLocation = azureLabel + "machine_location" + azureLabelMachinePrivateIP = azureLabel + "machine_private_ip" + azureLabelMachineTag = azureLabel + "machine_tag_" +) + +var ( + azureSDRefreshFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_azure_refresh_failures_total", + Help: "Number of Azure-SD refresh failures.", + }) + azureSDRefreshDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_azure_refresh_duration_seconds", + Help: "The duration of a Azure-SD refresh in seconds.", + }) + + // DefaultSDConfig is the default Azure SD configuration. + DefaultSDConfig = SDConfig{ + Port: 80, + RefreshInterval: model.Duration(5 * time.Minute), + } +) + +// SDConfig is the configuration for Azure based service discovery. +type SDConfig struct { + Port int `yaml:"port"` + SubscriptionID string `yaml:"subscription_id"` + TenantID string `yaml:"tenant_id,omitempty"` + ClientID string `yaml:"client_id,omitempty"` + ClientSecret config_util.Secret `yaml:"client_secret,omitempty"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.SubscriptionID == "" { + return fmt.Errorf("Azure SD configuration requires a subscription_id") + } + return nil +} + +func init() { + prometheus.MustRegister(azureSDRefreshDuration) + prometheus.MustRegister(azureSDRefreshFailuresCount) +} + +// Discovery periodically performs Azure-SD requests. It implements +// the Discoverer interface. +type Discovery struct { + cfg *SDConfig + interval time.Duration + port int + logger log.Logger +} + +// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. +func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery { + if logger == nil { + logger = log.NewNopLogger() + } + return &Discovery{ + cfg: cfg, + interval: time.Duration(cfg.RefreshInterval), + port: cfg.Port, + logger: logger, + } +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + default: + } + + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to refresh during Azure discovery", "err", err) + } else { + select { + case <-ctx.Done(): + case ch <- []*targetgroup.Group{tg}: + } + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + } +} + +// azureClient represents multiple Azure Resource Manager providers. +type azureClient struct { + nic network.InterfacesClient + vm compute.VirtualMachinesClient +} + +// createAzureClient is a helper function for creating an Azure compute client to ARM. +func createAzureClient(cfg SDConfig) (azureClient, error) { + var c azureClient + oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(cfg.TenantID) + if err != nil { + return azureClient{}, err + } + spt, err := azure.NewServicePrincipalToken(*oauthConfig, cfg.ClientID, string(cfg.ClientSecret), azure.PublicCloud.ResourceManagerEndpoint) + if err != nil { + return azureClient{}, err + } + + c.vm = compute.NewVirtualMachinesClient(cfg.SubscriptionID) + c.vm.Authorizer = spt + + c.nic = network.NewInterfacesClient(cfg.SubscriptionID) + c.nic.Authorizer = spt + + return c, nil +} + +// azureResource represents a resource identifier in Azure. +type azureResource struct { + Name string + ResourceGroup string +} + +// Create a new azureResource object from an ID string. +func newAzureResourceFromID(id string, logger log.Logger) (azureResource, error) { + // Resource IDs have the following format. + // /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME + s := strings.Split(id, "/") + if len(s) != 9 { + err := fmt.Errorf("invalid ID '%s'. Refusing to create azureResource", id) + level.Error(logger).Log("err", err) + return azureResource{}, err + } + return azureResource{ + Name: strings.ToLower(s[8]), + ResourceGroup: strings.ToLower(s[4]), + }, nil +} + +func (d *Discovery) refresh() (tg *targetgroup.Group, err error) { + defer level.Debug(d.logger).Log("msg", "Azure discovery completed") + + t0 := time.Now() + defer func() { + azureSDRefreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + azureSDRefreshFailuresCount.Inc() + } + }() + tg = &targetgroup.Group{} + client, err := createAzureClient(*d.cfg) + if err != nil { + return tg, fmt.Errorf("could not create Azure client: %s", err) + } + + var machines []compute.VirtualMachine + result, err := client.vm.ListAll() + if err != nil { + return tg, fmt.Errorf("could not list virtual machines: %s", err) + } + machines = append(machines, *result.Value...) + + // If we still have results, keep going until we have no more. + for result.NextLink != nil { + result, err = client.vm.ListAllNextResults(result) + if err != nil { + return tg, fmt.Errorf("could not list virtual machines: %s", err) + } + machines = append(machines, *result.Value...) + } + level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines)) + + // We have the slice of machines. Now turn them into targets. + // Doing them in go routines because the network interface calls are slow. + type target struct { + labelSet model.LabelSet + err error + } + + ch := make(chan target, len(machines)) + for i, vm := range machines { + go func(i int, vm compute.VirtualMachine) { + r, err := newAzureResourceFromID(*vm.ID, d.logger) + if err != nil { + ch <- target{labelSet: nil, err: err} + return + } + + labels := model.LabelSet{ + azureLabelMachineID: model.LabelValue(*vm.ID), + azureLabelMachineName: model.LabelValue(*vm.Name), + azureLabelMachineOSType: model.LabelValue(vm.Properties.StorageProfile.OsDisk.OsType), + azureLabelMachineLocation: model.LabelValue(*vm.Location), + azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup), + } + + if vm.Tags != nil { + for k, v := range *vm.Tags { + name := strutil.SanitizeLabelName(k) + labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v) + } + } + + // Get the IP address information via separate call to the network provider. + for _, nic := range *vm.Properties.NetworkProfile.NetworkInterfaces { + r, err := newAzureResourceFromID(*nic.ID, d.logger) + if err != nil { + ch <- target{labelSet: nil, err: err} + return + } + networkInterface, err := client.nic.Get(r.ResourceGroup, r.Name, "") + if err != nil { + level.Error(d.logger).Log("msg", "Unable to get network interface", "name", r.Name, "err", err) + ch <- target{labelSet: nil, err: err} + // Get out of this routine because we cannot continue without a network interface. + return + } + + // Unfortunately Azure does not return information on whether a VM is deallocated. + // This information is available via another API call however the Go SDK does not + // yet support this. On deallocated machines, this value happens to be nil so it + // is a cheap and easy way to determine if a machine is allocated or not. + if networkInterface.Properties.Primary == nil { + level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", *vm.Name) + ch <- target{} + return + } + + if *networkInterface.Properties.Primary { + for _, ip := range *networkInterface.Properties.IPConfigurations { + if ip.Properties.PrivateIPAddress != nil { + labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) + address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port)) + labels[model.AddressLabel] = model.LabelValue(address) + ch <- target{labelSet: labels, err: nil} + return + } + // If we made it here, we don't have a private IP which should be impossible. + // Return an empty target and error to ensure an all or nothing situation. + err = fmt.Errorf("unable to find a private IP for VM %s", *vm.Name) + ch <- target{labelSet: nil, err: err} + return + } + } + } + }(i, vm) + } + + for range machines { + tgt := <-ch + if tgt.err != nil { + return nil, fmt.Errorf("unable to complete Azure service discovery: %s", err) + } + if tgt.labelSet != nil { + tg.Targets = append(tg.Targets, tgt.labelSet) + } + } + + return tg, nil +} diff --git a/src/prometheus/discovery/config/config.go b/src/prometheus/discovery/config/config.go new file mode 100644 index 0000000..68a9bc1 --- /dev/null +++ b/src/prometheus/discovery/config/config.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "github.com/prometheus/prometheus/discovery/azure" + "github.com/prometheus/prometheus/discovery/consul" + "github.com/prometheus/prometheus/discovery/dns" + "github.com/prometheus/prometheus/discovery/ec2" + "github.com/prometheus/prometheus/discovery/file" + "github.com/prometheus/prometheus/discovery/gce" + "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/discovery/marathon" + "github.com/prometheus/prometheus/discovery/openstack" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/discovery/triton" + "github.com/prometheus/prometheus/discovery/zookeeper" +) + +// ServiceDiscoveryConfig configures lists of different service discovery mechanisms. +type ServiceDiscoveryConfig struct { + // List of labeled target groups for this job. + StaticConfigs []*targetgroup.Group `yaml:"static_configs,omitempty"` + // List of DNS service discovery configurations. + DNSSDConfigs []*dns.SDConfig `yaml:"dns_sd_configs,omitempty"` + // List of file service discovery configurations. + FileSDConfigs []*file.SDConfig `yaml:"file_sd_configs,omitempty"` + // List of Consul service discovery configurations. + ConsulSDConfigs []*consul.SDConfig `yaml:"consul_sd_configs,omitempty"` + // List of Serverset service discovery configurations. + ServersetSDConfigs []*zookeeper.ServersetSDConfig `yaml:"serverset_sd_configs,omitempty"` + // NerveSDConfigs is a list of Nerve service discovery configurations. + NerveSDConfigs []*zookeeper.NerveSDConfig `yaml:"nerve_sd_configs,omitempty"` + // MarathonSDConfigs is a list of Marathon service discovery configurations. + MarathonSDConfigs []*marathon.SDConfig `yaml:"marathon_sd_configs,omitempty"` + // List of Kubernetes service discovery configurations. + KubernetesSDConfigs []*kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"` + // List of GCE service discovery configurations. + GCESDConfigs []*gce.SDConfig `yaml:"gce_sd_configs,omitempty"` + // List of EC2 service discovery configurations. + EC2SDConfigs []*ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"` + // List of OpenStack service discovery configurations. + OpenstackSDConfigs []*openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"` + // List of Azure service discovery configurations. + AzureSDConfigs []*azure.SDConfig `yaml:"azure_sd_configs,omitempty"` + // List of Triton service discovery configurations. + TritonSDConfigs []*triton.SDConfig `yaml:"triton_sd_configs,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *ServiceDiscoveryConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain ServiceDiscoveryConfig + return unmarshal((*plain)(c)) +} diff --git a/src/prometheus/discovery/consul/consul.go b/src/prometheus/discovery/consul/consul.go new file mode 100644 index 0000000..4a7c6cf --- /dev/null +++ b/src/prometheus/discovery/consul/consul.go @@ -0,0 +1,517 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consul + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + consul "github.com/hashicorp/consul/api" + "github.com/mwitkow/go-conntrack" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + watchTimeout = 30 * time.Second + retryInterval = 15 * time.Second + + // addressLabel is the name for the label containing a target's address. + addressLabel = model.MetaLabelPrefix + "consul_address" + // nodeLabel is the name for the label containing a target's node name. + nodeLabel = model.MetaLabelPrefix + "consul_node" + // metaDataLabel is the prefix for the labels mapping to a target's metadata. + metaDataLabel = model.MetaLabelPrefix + "consul_metadata_" + // tagsLabel is the name of the label containing the tags assigned to the target. + tagsLabel = model.MetaLabelPrefix + "consul_tags" + // serviceLabel is the name of the label containing the service name. + serviceLabel = model.MetaLabelPrefix + "consul_service" + // serviceAddressLabel is the name of the label containing the (optional) service address. + serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" + //servicePortLabel is the name of the label containing the service port. + servicePortLabel = model.MetaLabelPrefix + "consul_service_port" + // datacenterLabel is the name of the label containing the datacenter ID. + datacenterLabel = model.MetaLabelPrefix + "consul_dc" + // serviceIDLabel is the name of the label containing the service ID. + serviceIDLabel = model.MetaLabelPrefix + "consul_service_id" + + // Constants for instrumentation. + namespace = "prometheus" +) + +var ( + rpcFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_consul_rpc_failures_total", + Help: "The number of Consul RPC call failures.", + }) + rpcDuration = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: namespace, + Name: "sd_consul_rpc_duration_seconds", + Help: "The duration of a Consul RPC call in seconds.", + }, + []string{"endpoint", "call"}, + ) + + // DefaultSDConfig is the default Consul SD configuration. + DefaultSDConfig = SDConfig{ + TagSeparator: ",", + Scheme: "http", + Server: "localhost:8500", + AllowStale: true, + RefreshInterval: model.Duration(watchTimeout), + } +) + +// SDConfig is the configuration for Consul service discovery. +type SDConfig struct { + Server string `yaml:"server,omitempty"` + Token config_util.Secret `yaml:"token,omitempty"` + Datacenter string `yaml:"datacenter,omitempty"` + TagSeparator string `yaml:"tag_separator,omitempty"` + Scheme string `yaml:"scheme,omitempty"` + Username string `yaml:"username,omitempty"` + Password config_util.Secret `yaml:"password,omitempty"` + + // See https://www.consul.io/docs/internals/consensus.html#consistency-modes, + // stale reads are a lot cheaper and are a necessity if you have >5k targets. + AllowStale bool `yaml:"allow_stale"` + // By default use blocking queries (https://www.consul.io/api/index.html#blocking-queries) + // but allow users to throttle updates if necessary. This can be useful because of "bugs" like + // https://github.com/hashicorp/consul/issues/3712 which cause an un-necessary + // amount of requests on consul. + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + + // See https://www.consul.io/api/catalog.html#list-services + // The list of services for which targets are discovered. + // Defaults to all services if empty. + Services []string `yaml:"services"` + // An optional tag used to filter instances inside a service. A single tag is supported + // here to match the Consul API. + ServiceTag string `yaml:"tag,omitempty"` + // Desired node metadata. + NodeMeta map[string]string `yaml:"node_meta,omitempty"` + + TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if strings.TrimSpace(c.Server) == "" { + return fmt.Errorf("Consul SD configuration requires a server address") + } + return nil +} + +func init() { + prometheus.MustRegister(rpcFailuresCount) + prometheus.MustRegister(rpcDuration) + + // Initialize metric vectors. + rpcDuration.WithLabelValues("catalog", "service") + rpcDuration.WithLabelValues("catalog", "services") +} + +// Discovery retrieves target information from a Consul server +// and updates them via watches. +type Discovery struct { + client *consul.Client + clientConf *consul.Config + clientDatacenter string + tagSeparator string + watchedServices []string // Set of services which will be discovered. + watchedTag string // A tag used to filter instances of a service. + watchedNodeMeta map[string]string + allowStale bool + refreshInterval time.Duration + logger log.Logger +} + +// NewDiscovery returns a new Discovery for the given config. +func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { + if logger == nil { + logger = log.NewNopLogger() + } + + tls, err := config_util.NewTLSConfig(&conf.TLSConfig) + if err != nil { + return nil, err + } + transport := &http.Transport{ + TLSClientConfig: tls, + DialContext: conntrack.NewDialContextFunc( + conntrack.DialWithTracing(), + conntrack.DialWithName("consul_sd"), + ), + } + wrapper := &http.Client{ + Transport: transport, + Timeout: 35 * time.Second, + } + + clientConf := &consul.Config{ + Address: conf.Server, + Scheme: conf.Scheme, + Datacenter: conf.Datacenter, + Token: string(conf.Token), + HttpAuth: &consul.HttpBasicAuth{ + Username: conf.Username, + Password: string(conf.Password), + }, + HttpClient: wrapper, + } + client, err := consul.NewClient(clientConf) + if err != nil { + return nil, err + } + cd := &Discovery{ + client: client, + clientConf: clientConf, + tagSeparator: conf.TagSeparator, + watchedServices: conf.Services, + watchedTag: conf.ServiceTag, + watchedNodeMeta: conf.NodeMeta, + allowStale: conf.AllowStale, + refreshInterval: time.Duration(conf.RefreshInterval), + clientDatacenter: clientConf.Datacenter, + logger: logger, + } + return cd, nil +} + +// shouldWatch returns whether the service of the given name should be watched. +func (d *Discovery) shouldWatch(name string, tags []string) bool { + return d.shouldWatchFromName(name) && d.shouldWatchFromTags(tags) +} + +// shouldWatch returns whether the service of the given name should be watched based on its name. +func (d *Discovery) shouldWatchFromName(name string) bool { + // If there's no fixed set of watched services, we watch everything. + if len(d.watchedServices) == 0 { + return true + } + + for _, sn := range d.watchedServices { + if sn == name { + return true + } + } + return false +} + +// shouldWatch returns whether the service of the given name should be watched based on its tags. +// This gets called when the user doesn't specify a list of services in order to avoid watching +// *all* services. Details in https://github.com/prometheus/prometheus/pull/3814 +func (d *Discovery) shouldWatchFromTags(tags []string) bool { + // If there's no fixed set of watched tags, we watch everything. + if d.watchedTag == "" { + return true + } + + for _, tag := range tags { + if d.watchedTag == tag { + return true + } + } + return false +} + +// Get the local datacenter if not specified. +func (d *Discovery) getDatacenter() error { + // If the datacenter was not set from clientConf, let's get it from the local Consul agent + // (Consul default is to use local node's datacenter if one isn't given for a query). + if d.clientDatacenter != "" { + return nil + } + + info, err := d.client.Agent().Self() + if err != nil { + level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + rpcFailuresCount.Inc() + return err + } + + dc, ok := info["Config"]["Datacenter"].(string) + if !ok { + err := fmt.Errorf("Invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"]) + level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) + return err + } + + d.clientDatacenter = dc + return nil +} + +// Initialize the Discoverer run. +func (d *Discovery) initialize(ctx context.Context) { + // Loop until we manage to get the local datacenter. + for { + // We have to check the context at least once. The checks during channel sends + // do not guarantee that. + select { + case <-ctx.Done(): + return + default: + } + + // Get the local datacenter first, if necessary. + err := d.getDatacenter() + if err != nil { + time.Sleep(retryInterval) + continue + } + // We are good to go. + return + } +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + d.initialize(ctx) + + if len(d.watchedServices) == 0 || d.watchedTag != "" { + // We need to watch the catalog. + ticker := time.NewTicker(d.refreshInterval) + + // Watched services and their cancellation functions. + services := make(map[string]func()) + var lastIndex uint64 + + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + default: + d.watchServices(ctx, ch, &lastIndex, services) + <-ticker.C + } + } + + } else { + // We only have fully defined services. + for _, name := range d.watchedServices { + d.watchService(ctx, ch, name) + } + <-ctx.Done() + } +} + +// Watch the catalog for new services we would like to watch. This is called only +// when we don't know yet the names of the services and need to ask Consul the +// entire list of services. +func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) error { + catalog := d.client.Catalog() + level.Debug(d.logger).Log("msg", "Watching services", "tag", d.watchedTag) + + t0 := time.Now() + srvs, meta, err := catalog.Services(&consul.QueryOptions{ + WaitIndex: *lastIndex, + WaitTime: watchTimeout, + AllowStale: d.allowStale, + NodeMeta: d.watchedNodeMeta, + }) + elapsed := time.Since(t0) + rpcDuration.WithLabelValues("catalog", "services").Observe(elapsed.Seconds()) + + if err != nil { + level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) + rpcFailuresCount.Inc() + time.Sleep(retryInterval) + return err + } + // If the index equals the previous one, the watch timed out with no update. + if meta.LastIndex == *lastIndex { + return nil + } + *lastIndex = meta.LastIndex + + // Check for new services. + for name := range srvs { + // catalog.Service() returns a map of service name to tags, we can use that to watch + // only the services that have the tag we are looking for (if specified). + // In the future consul will also support server side for service metadata. + // https://github.com/hashicorp/consul/issues/1107 + if !d.shouldWatch(name, srvs[name]) { + continue + } + if _, ok := services[name]; ok { + continue // We are already watching the service. + } + + wctx, cancel := context.WithCancel(ctx) + d.watchService(wctx, ch, name) + services[name] = cancel + } + + // Check for removed services. + for name, cancel := range services { + if _, ok := srvs[name]; !ok { + // Call the watch cancellation function. + cancel() + delete(services, name) + + // Send clearing target group. + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- []*targetgroup.Group{{Source: name}}: + } + } + } + return nil +} + +// consulService contains data belonging to the same service. +type consulService struct { + name string + tag string + labels model.LabelSet + discovery *Discovery + client *consul.Client + tagSeparator string + logger log.Logger +} + +// Start watching a service. +func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.Group, name string) { + srv := &consulService{ + discovery: d, + client: d.client, + name: name, + tag: d.watchedTag, + labels: model.LabelSet{ + serviceLabel: model.LabelValue(name), + datacenterLabel: model.LabelValue(d.clientDatacenter), + }, + tagSeparator: d.tagSeparator, + logger: d.logger, + } + + go func() { + ticker := time.NewTicker(d.refreshInterval) + var lastIndex uint64 + catalog := srv.client.Catalog() + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + default: + srv.watch(ctx, ch, catalog, &lastIndex) + <-ticker.C + } + } + }() +} + +// Get updates for a service. +func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, catalog *consul.Catalog, lastIndex *uint64) error { + level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tag", srv.tag) + + t0 := time.Now() + nodes, meta, err := catalog.Service(srv.name, srv.tag, &consul.QueryOptions{ + WaitIndex: *lastIndex, + WaitTime: watchTimeout, + AllowStale: srv.discovery.allowStale, + NodeMeta: srv.discovery.watchedNodeMeta, + }) + elapsed := time.Since(t0) + rpcDuration.WithLabelValues("catalog", "service").Observe(elapsed.Seconds()) + + // Check the context before in order to exit early. + select { + case <-ctx.Done(): + return ctx.Err() + default: + // Continue. + } + + if err != nil { + level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tag", srv.tag, "err", err) + rpcFailuresCount.Inc() + time.Sleep(retryInterval) + return err + } + // If the index equals the previous one, the watch timed out with no update. + if meta.LastIndex == *lastIndex { + return nil + } + *lastIndex = meta.LastIndex + + tgroup := targetgroup.Group{ + Source: srv.name, + Labels: srv.labels, + Targets: make([]model.LabelSet, 0, len(nodes)), + } + + for _, node := range nodes { + + // We surround the separated list with the separator as well. This way regular expressions + // in relabeling rules don't have to consider tag positions. + var tags = srv.tagSeparator + strings.Join(node.ServiceTags, srv.tagSeparator) + srv.tagSeparator + + // If the service address is not empty it should be used instead of the node address + // since the service may be registered remotely through a different node + var addr string + if node.ServiceAddress != "" { + addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) + } else { + addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) + } + + labels := model.LabelSet{ + model.AddressLabel: model.LabelValue(addr), + addressLabel: model.LabelValue(node.Address), + nodeLabel: model.LabelValue(node.Node), + tagsLabel: model.LabelValue(tags), + serviceAddressLabel: model.LabelValue(node.ServiceAddress), + servicePortLabel: model.LabelValue(strconv.Itoa(node.ServicePort)), + serviceIDLabel: model.LabelValue(node.ServiceID), + } + + // Add all key/value pairs from the node's metadata as their own labels + for k, v := range node.NodeMeta { + name := strutil.SanitizeLabelName(k) + labels[metaDataLabel+model.LabelName(name)] = model.LabelValue(v) + } + + tgroup.Targets = append(tgroup.Targets, labels) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- []*targetgroup.Group{&tgroup}: + } + return nil +} diff --git a/src/prometheus/discovery/consul/consul_test.go b/src/prometheus/discovery/consul/consul_test.go new file mode 100644 index 0000000..1742bb2 --- /dev/null +++ b/src/prometheus/discovery/consul/consul_test.go @@ -0,0 +1,207 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consul + +import ( + "context" + "testing" + "time" + + "net/http" + "net/http/httptest" + "net/url" + + "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/testutil" +) + +func TestConfiguredService(t *testing.T) { + conf := &SDConfig{ + Services: []string{"configuredServiceName"}} + consulDiscovery, err := NewDiscovery(conf, nil) + + if err != nil { + t.Errorf("Unexpected error when initialising discovery %v", err) + } + if !consulDiscovery.shouldWatch("configuredServiceName", []string{""}) { + t.Errorf("Expected service %s to be watched", "configuredServiceName") + } + if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { + t.Errorf("Expected service %s to not be watched", "nonConfiguredServiceName") + } +} + +func TestConfiguredServiceWithTag(t *testing.T) { + conf := &SDConfig{ + Services: []string{"configuredServiceName"}, + ServiceTag: "http", + } + consulDiscovery, err := NewDiscovery(conf, nil) + + if err != nil { + t.Errorf("Unexpected error when initialising discovery %v", err) + } + if consulDiscovery.shouldWatch("configuredServiceName", []string{""}) { + t.Errorf("Expected service %s to not be watched without tag", "configuredServiceName") + } + if !consulDiscovery.shouldWatch("configuredServiceName", []string{"http"}) { + t.Errorf("Expected service %s to be watched with tag %s", "configuredServiceName", "http") + } + if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { + t.Errorf("Expected service %s to not be watched without tag", "nonConfiguredServiceName") + } + if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{"http"}) { + t.Errorf("Expected service %s to not be watched with tag %s", "nonConfiguredServiceName", "http") + } +} + +func TestNonConfiguredService(t *testing.T) { + conf := &SDConfig{} + consulDiscovery, err := NewDiscovery(conf, nil) + + if err != nil { + t.Errorf("Unexpected error when initialising discovery %v", err) + } + if !consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { + t.Errorf("Expected service %s to be watched", "nonConfiguredServiceName") + } +} + +const ( + AgentAnswer = `{"Config": {"Datacenter": "test-dc"}}` + ServiceTestAnswer = `[{ +"ID": "b78c2e48-5ef3-1814-31b8-0d880f50471e", +"Node": "node1", +"Address": "1.1.1.1", +"Datacenter": "test-dc", +"NodeMeta": {"rack_name": "2304"}, +"ServiceID": "test", +"ServiceName": "test", +"ServiceTags": ["tag1"], +"ServicePort": 3341, +"CreateIndex": 1, +"ModifyIndex": 1 +}]` + ServicesTestAnswer = `{"test": ["tag1"], "other": ["tag2"]}` +) + +func newServer(t *testing.T) (*httptest.Server, *SDConfig) { + // github.com/hashicorp/consul/testutil/ would be nice but it needs a local consul binary. + stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := "" + switch r.URL.String() { + case "/v1/agent/self": + response = AgentAnswer + case "/v1/catalog/service/test?node-meta=rack_name%3A2304&stale=&tag=tag1&wait=30000ms": + response = ServiceTestAnswer + case "/v1/catalog/service/test?wait=30000ms": + response = ServiceTestAnswer + case "/v1/catalog/service/other?wait=30000ms": + response = `[]` + case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=30000ms": + response = ServicesTestAnswer + case "/v1/catalog/services?wait=30000ms": + response = ServicesTestAnswer + case "/v1/catalog/services?index=1&node-meta=rack_name%3A2304&stale=&wait=30000ms": + time.Sleep(5 * time.Second) + response = ServicesTestAnswer + case "/v1/catalog/services?index=1&wait=30000ms": + time.Sleep(5 * time.Second) + response = ServicesTestAnswer + default: + t.Errorf("Unhandeld consul call: %s", r.URL) + } + w.Header().Add("X-Consul-Index", "1") + w.Write([]byte(response)) + })) + stuburl, err := url.Parse(stub.URL) + testutil.Ok(t, err) + + config := &SDConfig{ + Server: stuburl.Host, + Token: "fake-token", + RefreshInterval: model.Duration(1 * time.Second), + } + return stub, config +} + +func newDiscovery(t *testing.T, config *SDConfig) *Discovery { + logger := log.NewNopLogger() + d, err := NewDiscovery(config, logger) + testutil.Ok(t, err) + return d +} + +func checkOneTarget(t *testing.T, tg []*targetgroup.Group) { + testutil.Equals(t, 1, len(tg)) + target := tg[0] + testutil.Equals(t, "test-dc", string(target.Labels["__meta_consul_dc"])) + testutil.Equals(t, target.Source, string(target.Labels["__meta_consul_service"])) + if target.Source == "test" { + // test service should have one node. + testutil.Assert(t, len(target.Targets) > 0, "Test service should have one node") + } +} + +// Watch all the services in the catalog. +func TestAllServices(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go d.Run(ctx, ch) + checkOneTarget(t, <-ch) + checkOneTarget(t, <-ch) + cancel() +} + +// Watch only the test service. +func TestOneService(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + + config.Services = []string{"test"} + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go d.Run(ctx, ch) + checkOneTarget(t, <-ch) + cancel() +} + +// Watch the test service with a specific tag and node-meta. +func TestAllOptions(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + + config.Services = []string{"test"} + config.NodeMeta = map[string]string{"rack_name": "2304"} + config.ServiceTag = "tag1" + config.AllowStale = true + config.Token = "fake-token" + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go d.Run(ctx, ch) + checkOneTarget(t, <-ch) + cancel() +} diff --git a/src/prometheus/discovery/dns/dns.go b/src/prometheus/discovery/dns/dns.go new file mode 100644 index 0000000..17e7dad --- /dev/null +++ b/src/prometheus/discovery/dns/dns.go @@ -0,0 +1,335 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dns + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/miekg/dns" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + resolvConf = "/etc/resolv.conf" + + dnsNameLabel = model.MetaLabelPrefix + "dns_name" + + // Constants for instrumentation. + namespace = "prometheus" +) + +var ( + dnsSDLookupsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookups_total", + Help: "The number of DNS-SD lookups.", + }) + dnsSDLookupFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookup_failures_total", + Help: "The number of DNS-SD lookup failures.", + }) + + // DefaultSDConfig is the default DNS SD configuration. + DefaultSDConfig = SDConfig{ + RefreshInterval: model.Duration(30 * time.Second), + Type: "SRV", + } +) + +// SDConfig is the configuration for DNS based service discovery. +type SDConfig struct { + Names []string `yaml:"names"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Type string `yaml:"type"` + Port int `yaml:"port"` // Ignored for SRV records +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if len(c.Names) == 0 { + return fmt.Errorf("DNS-SD config must contain at least one SRV record name") + } + switch strings.ToUpper(c.Type) { + case "SRV": + case "A", "AAAA": + if c.Port == 0 { + return fmt.Errorf("a port is required in DNS-SD configs for all record types except SRV") + } + default: + return fmt.Errorf("invalid DNS-SD records type %s", c.Type) + } + return nil +} + +func init() { + prometheus.MustRegister(dnsSDLookupFailuresCount) + prometheus.MustRegister(dnsSDLookupsCount) +} + +// Discovery periodically performs DNS-SD requests. It implements +// the Discoverer interface. +type Discovery struct { + names []string + + interval time.Duration + port int + qtype uint16 + logger log.Logger +} + +// NewDiscovery returns a new Discovery which periodically refreshes its targets. +func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { + if logger == nil { + logger = log.NewNopLogger() + } + + qtype := dns.TypeSRV + switch strings.ToUpper(conf.Type) { + case "A": + qtype = dns.TypeA + case "AAAA": + qtype = dns.TypeAAAA + case "SRV": + qtype = dns.TypeSRV + } + return &Discovery{ + names: conf.Names, + interval: time.Duration(conf.RefreshInterval), + qtype: qtype, + port: conf.Port, + logger: logger, + } +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + // Get an initial set right away. + d.refreshAll(ctx, ch) + + for { + select { + case <-ticker.C: + d.refreshAll(ctx, ch) + case <-ctx.Done(): + return + } + } +} + +func (d *Discovery) refreshAll(ctx context.Context, ch chan<- []*targetgroup.Group) { + var wg sync.WaitGroup + + wg.Add(len(d.names)) + for _, name := range d.names { + go func(n string) { + if err := d.refresh(ctx, n, ch); err != nil { + level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err) + } + wg.Done() + }(name) + } + + wg.Wait() +} + +func (d *Discovery) refresh(ctx context.Context, name string, ch chan<- []*targetgroup.Group) error { + response, err := lookupWithSearchPath(name, d.qtype, d.logger) + dnsSDLookupsCount.Inc() + if err != nil { + dnsSDLookupFailuresCount.Inc() + return err + } + + tg := &targetgroup.Group{} + hostPort := func(a string, p int) model.LabelValue { + return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) + } + + for _, record := range response.Answer { + target := model.LabelValue("") + switch addr := record.(type) { + case *dns.SRV: + // Remove the final dot from rooted DNS names to make them look more usual. + addr.Target = strings.TrimRight(addr.Target, ".") + + target = hostPort(addr.Target, int(addr.Port)) + case *dns.A: + target = hostPort(addr.A.String(), d.port) + case *dns.AAAA: + target = hostPort(addr.AAAA.String(), d.port) + default: + level.Warn(d.logger).Log("msg", "Invalid SRV record", "record", record) + continue + } + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: target, + dnsNameLabel: model.LabelValue(name), + }) + } + + tg.Source = name + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- []*targetgroup.Group{tg}: + } + + return nil +} + +// lookupWithSearchPath tries to get an answer for various permutations of +// the given name, appending the system-configured search path as necessary. +// +// There are three possible outcomes: +// +// 1. One of the permutations of the given name is recognised as +// "valid" by the DNS, in which case we consider ourselves "done" +// and that answer is returned. Note that, due to the way the DNS +// handles "name has resource records, but none of the specified type", +// the answer received may have an empty set of results. +// +// 2. All of the permutations of the given name are responded to by one of +// the servers in the "nameservers" list with the answer "that name does +// not exist" (NXDOMAIN). In that case, it can be considered +// pseudo-authoritative that there are no records for that name. +// +// 3. One or more of the names was responded to by all servers with some +// sort of error indication. In that case, we can't know if, in fact, +// there are records for the name or not, so whatever state the +// configuration is in, we should keep it that way until we know for +// sure (by, presumably, all the names getting answers in the future). +// +// Outcomes 1 and 2 are indicated by a valid response message (possibly an +// empty one) and no error. Outcome 3 is indicated by an error return. The +// error will be generic-looking, because trying to return all the errors +// returned by the combination of all name permutations and servers is a +// nightmare. +func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { + conf, err := dns.ClientConfigFromFile(resolvConf) + if err != nil { + return nil, fmt.Errorf("could not load resolv.conf: %s", err) + } + + allResponsesValid := true + + for _, lname := range conf.NameList(name) { + response, err := lookupFromAnyServer(lname, qtype, conf, logger) + + if err != nil { + // We can't go home yet, because a later name + // may give us a valid, successful answer. However + // we can no longer say "this name definitely doesn't + // exist", because we did not get that answer for + // at least one name. + allResponsesValid = false + } else if response.Rcode == dns.RcodeSuccess { + // Outcome 1: GOLD! + return response, nil + } + } + + if allResponsesValid { + // Outcome 2: everyone says NXDOMAIN, that's good enough for me + return &dns.Msg{}, nil + } + // Outcome 3: boned. + return nil, fmt.Errorf("could not resolve %q: all servers responded with errors to at least one search domain", name) +} + +// lookupFromAnyServer uses all configured servers to try and resolve a specific +// name. If a viable answer is received from a server, then it is +// immediately returned, otherwise the other servers in the config are +// tried, and if none of them return a viable answer, an error is returned. +// +// A "viable answer" is one which indicates either: +// +// 1. "yes, I know that name, and here are its records of the requested type" +// (RCODE==SUCCESS, ANCOUNT > 0); +// 2. "yes, I know that name, but it has no records of the requested type" +// (RCODE==SUCCESS, ANCOUNT==0); or +// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). +// +// A non-viable answer is "anything else", which encompasses both various +// system-level problems (like network timeouts) and also +// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). +func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) { + client := &dns.Client{} + + for _, server := range conf.Servers { + servAddr := net.JoinHostPort(server, conf.Port) + msg, err := askServerForName(name, qtype, client, servAddr, true) + if err != nil { + level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err) + continue + } + + if msg.Rcode == dns.RcodeSuccess || msg.Rcode == dns.RcodeNameError { + // We have our answer. Time to go home. + return msg, nil + } + } + + return nil, fmt.Errorf("could not resolve %s: no servers returned a viable answer", name) +} + +// askServerForName makes a request to a specific DNS server for a specific +// name (and qtype). Retries with TCP in the event of response truncation, +// but otherwise just sends back whatever the server gave, whether that be a +// valid-looking response, or an error. +func askServerForName(name string, queryType uint16, client *dns.Client, servAddr string, edns bool) (*dns.Msg, error) { + msg := &dns.Msg{} + + msg.SetQuestion(dns.Fqdn(name), queryType) + if edns { + msg.SetEdns0(dns.DefaultMsgSize, false) + } + + response, _, err := client.Exchange(msg, servAddr) + if err == dns.ErrTruncated { + if client.Net == "tcp" { + return nil, fmt.Errorf("got truncated message on TCP (64kiB limit exceeded?)") + } + + client.Net = "tcp" + return askServerForName(name, queryType, client, servAddr, false) + } + if err != nil { + return nil, err + } + if msg.Id != response.Id { + return nil, fmt.Errorf("DNS ID mismatch, request: %d, response: %d", msg.Id, response.Id) + } + return response, nil +} diff --git a/src/prometheus/discovery/ec2/ec2.go b/src/prometheus/discovery/ec2/ec2.go new file mode 100644 index 0000000..fb1c7ad --- /dev/null +++ b/src/prometheus/discovery/ec2/ec2.go @@ -0,0 +1,287 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ec2 + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + + "github.com/aws/aws-sdk-go/service/ec2" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + ec2Label = model.MetaLabelPrefix + "ec2_" + ec2LabelAZ = ec2Label + "availability_zone" + ec2LabelInstanceID = ec2Label + "instance_id" + ec2LabelInstanceState = ec2Label + "instance_state" + ec2LabelInstanceType = ec2Label + "instance_type" + ec2LabelPublicDNS = ec2Label + "public_dns_name" + ec2LabelPublicIP = ec2Label + "public_ip" + ec2LabelPrivateIP = ec2Label + "private_ip" + ec2LabelSubnetID = ec2Label + "subnet_id" + ec2LabelTag = ec2Label + "tag_" + ec2LabelVPCID = ec2Label + "vpc_id" + subnetSeparator = "," +) + +var ( + ec2SDRefreshFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_ec2_refresh_failures_total", + Help: "The number of EC2-SD scrape failures.", + }) + ec2SDRefreshDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_ec2_refresh_duration_seconds", + Help: "The duration of a EC2-SD refresh in seconds.", + }) + // DefaultSDConfig is the default EC2 SD configuration. + DefaultSDConfig = SDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + } +) + +// Filter is the configuration for filtering EC2 instances. +type Filter struct { + Name string `yaml:"name"` + Values []string `yaml:"values"` +} + +// SDConfig is the configuration for EC2 based service discovery. +type SDConfig struct { + Region string `yaml:"region"` + AccessKey string `yaml:"access_key,omitempty"` + SecretKey config_util.Secret `yaml:"secret_key,omitempty"` + Profile string `yaml:"profile,omitempty"` + RoleARN string `yaml:"role_arn,omitempty"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Port int `yaml:"port"` + Filters []*Filter `yaml:"filters"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.Region == "" { + sess, err := session.NewSession() + if err != nil { + return err + } + metadata := ec2metadata.New(sess) + region, err := metadata.Region() + if err != nil { + return fmt.Errorf("EC2 SD configuration requires a region") + } + c.Region = region + } + for _, f := range c.Filters { + if len(f.Values) == 0 { + return fmt.Errorf("EC2 SD configuration filter values cannot be empty") + } + } + return nil +} + +func init() { + prometheus.MustRegister(ec2SDRefreshFailuresCount) + prometheus.MustRegister(ec2SDRefreshDuration) +} + +// Discovery periodically performs EC2-SD requests. It implements +// the Discoverer interface. +type Discovery struct { + aws *aws.Config + interval time.Duration + profile string + roleARN string + port int + filters []*Filter + logger log.Logger +} + +// NewDiscovery returns a new EC2Discovery which periodically refreshes its targets. +func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { + creds := credentials.NewStaticCredentials(conf.AccessKey, string(conf.SecretKey), "") + if conf.AccessKey == "" && conf.SecretKey == "" { + creds = nil + } + if logger == nil { + logger = log.NewNopLogger() + } + return &Discovery{ + aws: &aws.Config{ + Region: &conf.Region, + Credentials: creds, + }, + profile: conf.Profile, + roleARN: conf.RoleARN, + filters: conf.Filters, + interval: time.Duration(conf.RefreshInterval), + port: conf.Port, + logger: logger, + } +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + // Get an initial set right away. + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Refresh failed", "err", err) + } else { + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + return + } + } + + for { + select { + case <-ticker.C: + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Refresh failed", "err", err) + continue + } + + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (d *Discovery) refresh() (tg *targetgroup.Group, err error) { + t0 := time.Now() + defer func() { + ec2SDRefreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + ec2SDRefreshFailuresCount.Inc() + } + }() + + sess, err := session.NewSessionWithOptions(session.Options{ + Config: *d.aws, + Profile: d.profile, + }) + if err != nil { + return nil, fmt.Errorf("could not create aws session: %s", err) + } + + var ec2s *ec2.EC2 + if d.roleARN != "" { + creds := stscreds.NewCredentials(sess, d.roleARN) + ec2s = ec2.New(sess, &aws.Config{Credentials: creds}) + } else { + ec2s = ec2.New(sess) + } + tg = &targetgroup.Group{ + Source: *d.aws.Region, + } + + var filters []*ec2.Filter + for _, f := range d.filters { + filters = append(filters, &ec2.Filter{ + Name: aws.String(f.Name), + Values: aws.StringSlice(f.Values), + }) + } + + input := &ec2.DescribeInstancesInput{Filters: filters} + + if err = ec2s.DescribeInstancesPages(input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool { + for _, r := range p.Reservations { + for _, inst := range r.Instances { + if inst.PrivateIpAddress == nil { + continue + } + labels := model.LabelSet{ + ec2LabelInstanceID: model.LabelValue(*inst.InstanceId), + } + labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress) + addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.port)) + labels[model.AddressLabel] = model.LabelValue(addr) + + if inst.PublicIpAddress != nil { + labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress) + labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName) + } + + labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) + labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name) + labels[ec2LabelInstanceType] = model.LabelValue(*inst.InstanceType) + + if inst.VpcId != nil { + labels[ec2LabelVPCID] = model.LabelValue(*inst.VpcId) + + subnetsMap := make(map[string]struct{}) + for _, eni := range inst.NetworkInterfaces { + subnetsMap[*eni.SubnetId] = struct{}{} + } + subnets := []string{} + for k := range subnetsMap { + subnets = append(subnets, k) + } + labels[ec2LabelSubnetID] = model.LabelValue( + subnetSeparator + + strings.Join(subnets, subnetSeparator) + + subnetSeparator) + } + + for _, t := range inst.Tags { + if t == nil || t.Key == nil || t.Value == nil { + continue + } + name := strutil.SanitizeLabelName(*t.Key) + labels[ec2LabelTag+model.LabelName(name)] = model.LabelValue(*t.Value) + } + tg.Targets = append(tg.Targets, labels) + } + } + return true + }); err != nil { + return nil, fmt.Errorf("could not describe instances: %s", err) + } + return tg, nil +} diff --git a/src/prometheus/discovery/file/file.go b/src/prometheus/discovery/file/file.go new file mode 100644 index 0000000..be63378 --- /dev/null +++ b/src/prometheus/discovery/file/file.go @@ -0,0 +1,409 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package file + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "gopkg.in/fsnotify/fsnotify.v1" + "gopkg.in/yaml.v2" +) + +var ( + patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) + + // DefaultSDConfig is the default file SD configuration. + DefaultSDConfig = SDConfig{ + RefreshInterval: model.Duration(5 * time.Minute), + } +) + +// SDConfig is the configuration for file based discovery. +type SDConfig struct { + Files []string `yaml:"files"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if len(c.Files) == 0 { + return fmt.Errorf("file service discovery config must contain at least one path name") + } + for _, name := range c.Files { + if !patFileSDName.MatchString(name) { + return fmt.Errorf("path name %q is not valid for file discovery", name) + } + } + return nil +} + +const fileSDFilepathLabel = model.MetaLabelPrefix + "filepath" + +// TimestampCollector is a Custom Collector for Timestamps of the files. +type TimestampCollector struct { + Description *prometheus.Desc + discoverers map[*Discovery]struct{} + lock sync.RWMutex +} + +// Describe method sends the description to the channel. +func (t *TimestampCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- t.Description +} + +// Collect creates constant metrics for each file with last modified time of the file. +func (t *TimestampCollector) Collect(ch chan<- prometheus.Metric) { + // New map to dedup filenames. + uniqueFiles := make(map[string]float64) + t.lock.RLock() + for fileSD := range t.discoverers { + fileSD.lock.RLock() + for filename, timestamp := range fileSD.timestamps { + uniqueFiles[filename] = timestamp + } + fileSD.lock.RUnlock() + } + t.lock.RUnlock() + for filename, timestamp := range uniqueFiles { + ch <- prometheus.MustNewConstMetric( + t.Description, + prometheus.GaugeValue, + timestamp, + filename, + ) + } +} + +func (t *TimestampCollector) addDiscoverer(disc *Discovery) { + t.lock.Lock() + t.discoverers[disc] = struct{}{} + t.lock.Unlock() +} + +func (t *TimestampCollector) removeDiscoverer(disc *Discovery) { + t.lock.Lock() + delete(t.discoverers, disc) + t.lock.Unlock() +} + +// NewTimestampCollector creates a TimestampCollector. +func NewTimestampCollector() *TimestampCollector { + return &TimestampCollector{ + Description: prometheus.NewDesc( + "prometheus_sd_file_mtime_seconds", + "Timestamp (mtime) of files read by FileSD. Timestamp is set at read time.", + []string{"filename"}, + nil, + ), + discoverers: make(map[*Discovery]struct{}), + } +} + +var ( + fileSDScanDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_file_scan_duration_seconds", + Help: "The duration of the File-SD scan in seconds.", + }) + fileSDReadErrorsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }) + fileSDTimeStamp = NewTimestampCollector() +) + +func init() { + prometheus.MustRegister(fileSDScanDuration) + prometheus.MustRegister(fileSDReadErrorsCount) + prometheus.MustRegister(fileSDTimeStamp) +} + +// Discovery provides service discovery functionality based +// on files that contain target groups in JSON or YAML format. Refreshing +// happens using file watches and periodic refreshes. +type Discovery struct { + paths []string + watcher *fsnotify.Watcher + interval time.Duration + timestamps map[string]float64 + lock sync.RWMutex + + // lastRefresh stores which files were found during the last refresh + // and how many target groups they contained. + // This is used to detect deleted target groups. + lastRefresh map[string]int + logger log.Logger +} + +// NewDiscovery returns a new file discovery for the given paths. +func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { + if logger == nil { + logger = log.NewNopLogger() + } + + disc := &Discovery{ + paths: conf.Files, + interval: time.Duration(conf.RefreshInterval), + timestamps: make(map[string]float64), + logger: logger, + } + fileSDTimeStamp.addDiscoverer(disc) + return disc +} + +// listFiles returns a list of all files that match the configured patterns. +func (d *Discovery) listFiles() []string { + var paths []string + for _, p := range d.paths { + files, err := filepath.Glob(p) + if err != nil { + level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err) + continue + } + paths = append(paths, files...) + } + return paths +} + +// watchFiles sets watches on all full paths or directories that were configured for +// this file discovery. +func (d *Discovery) watchFiles() { + if d.watcher == nil { + panic("no watcher configured") + } + for _, p := range d.paths { + if idx := strings.LastIndex(p, "/"); idx > -1 { + p = p[:idx] + } else { + p = "./" + } + if err := d.watcher.Add(p); err != nil { + level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err) + } + } +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + return + } + d.watcher = watcher + defer d.stop() + + d.refresh(ctx, ch) + + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case event := <-d.watcher.Events: + // fsnotify sometimes sends a bunch of events without name or operation. + // It's unclear what they are and why they are sent - filter them out. + if len(event.Name) == 0 { + break + } + // Everything but a chmod requires rereading. + if event.Op^fsnotify.Chmod == 0 { + break + } + // Changes to a file can spawn various sequences of events with + // different combinations of operations. For all practical purposes + // this is inaccurate. + // The most reliable solution is to reload everything if anything happens. + d.refresh(ctx, ch) + + case <-ticker.C: + // Setting a new watch after an update might fail. Make sure we don't lose + // those files forever. + d.refresh(ctx, ch) + + case err := <-d.watcher.Errors: + if err != nil { + level.Error(d.logger).Log("msg", "Error watching file", "err", err) + } + } + } +} + +func (d *Discovery) writeTimestamp(filename string, timestamp float64) { + d.lock.Lock() + d.timestamps[filename] = timestamp + d.lock.Unlock() +} + +func (d *Discovery) deleteTimestamp(filename string) { + d.lock.Lock() + delete(d.timestamps, filename) + d.lock.Unlock() +} + +// stop shuts down the file watcher. +func (d *Discovery) stop() { + level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) + + done := make(chan struct{}) + defer close(done) + + fileSDTimeStamp.removeDiscoverer(d) + + // Closing the watcher will deadlock unless all events and errors are drained. + go func() { + for { + select { + case <-d.watcher.Errors: + case <-d.watcher.Events: + // Drain all events and errors. + case <-done: + return + } + } + }() + if err := d.watcher.Close(); err != nil { + level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) + } + + level.Debug(d.logger).Log("msg", "File discovery stopped") +} + +// refresh reads all files matching the discovery's patterns and sends the respective +// updated target groups through the channel. +func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) { + t0 := time.Now() + defer func() { + fileSDScanDuration.Observe(time.Since(t0).Seconds()) + }() + ref := map[string]int{} + for _, p := range d.listFiles() { + tgroups, err := d.readFile(p) + if err != nil { + fileSDReadErrorsCount.Inc() + + level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) + // Prevent deletion down below. + ref[p] = d.lastRefresh[p] + continue + } + select { + case ch <- tgroups: + case <-ctx.Done(): + return + } + + ref[p] = len(tgroups) + } + // Send empty updates for sources that disappeared. + for f, n := range d.lastRefresh { + m, ok := ref[f] + if !ok || n > m { + level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f) + d.deleteTimestamp(f) + for i := m; i < n; i++ { + select { + case ch <- []*targetgroup.Group{{Source: fileSource(f, i)}}: + case <-ctx.Done(): + return + } + } + } + } + d.lastRefresh = ref + + d.watchFiles() +} + +// readFile reads a JSON or YAML list of targets groups from the file, depending on its +// file extension. It returns full configuration target groups. +func (d *Discovery) readFile(filename string) ([]*targetgroup.Group, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, err + } + defer fd.Close() + + content, err := ioutil.ReadAll(fd) + if err != nil { + return nil, err + } + + info, err := fd.Stat() + if err != nil { + return nil, err + } + + var targetGroups []*targetgroup.Group + + switch ext := filepath.Ext(filename); strings.ToLower(ext) { + case ".json": + if err := json.Unmarshal(content, &targetGroups); err != nil { + return nil, err + } + case ".yml", ".yaml": + if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil { + return nil, err + } + default: + panic(fmt.Errorf("discovery.File.readFile: unhandled file extension %q", ext)) + } + + for i, tg := range targetGroups { + if tg == nil { + err = errors.New("nil target group item found") + return nil, err + } + + tg.Source = fileSource(filename, i) + if tg.Labels == nil { + tg.Labels = model.LabelSet{} + } + tg.Labels[fileSDFilepathLabel] = model.LabelValue(filename) + } + + d.writeTimestamp(filename, float64(info.ModTime().Unix())) + + return targetGroups, nil +} + +// fileSource returns a source ID for the i-th target group in the file. +func fileSource(filename string, i int) string { + return fmt.Sprintf("%s:%d", filename, i) +} diff --git a/src/prometheus/discovery/file/file_test.go b/src/prometheus/discovery/file/file_test.go new file mode 100644 index 0000000..16d7977 --- /dev/null +++ b/src/prometheus/discovery/file/file_test.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package file + +import ( + "context" + "io" + "os" + "path/filepath" + "testing" + "time" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const testDir = "fixtures" + +func TestFileSD(t *testing.T) { + defer os.Remove(filepath.Join(testDir, "_test_valid.yml")) + defer os.Remove(filepath.Join(testDir, "_test_valid.json")) + defer os.Remove(filepath.Join(testDir, "_test_invalid_nil.json")) + defer os.Remove(filepath.Join(testDir, "_test_invalid_nil.yml")) + testFileSD(t, "valid", ".yml", true) + testFileSD(t, "valid", ".json", true) + testFileSD(t, "invalid_nil", ".json", false) + testFileSD(t, "invalid_nil", ".yml", false) +} + +func testFileSD(t *testing.T, prefix, ext string, expect bool) { + // As interval refreshing is more of a fallback, we only want to test + // whether file watches work as expected. + var conf SDConfig + conf.Files = []string{filepath.Join(testDir, "_*"+ext)} + conf.RefreshInterval = model.Duration(1 * time.Hour) + + var ( + fsd = NewDiscovery(&conf, nil) + ch = make(chan []*targetgroup.Group) + ctx, cancel = context.WithCancel(context.Background()) + ) + go fsd.Run(ctx, ch) + + select { + case <-time.After(25 * time.Millisecond): + // Expected. + case tgs := <-ch: + t.Fatalf("Unexpected target groups in file discovery: %s", tgs) + } + + // To avoid empty group struct sent from the discovery caused by invalid fsnotify updates, + // drain the channel until we are ready with the test files. + fileReady := make(chan struct{}) + drainReady := make(chan struct{}) + go func() { + for { + select { + case <-ch: + case <-fileReady: + close(drainReady) + return + } + } + }() + + newf, err := os.Create(filepath.Join(testDir, "_test_"+prefix+ext)) + if err != nil { + t.Fatal(err) + } + defer newf.Close() + + f, err := os.Open(filepath.Join(testDir, prefix+ext)) + if err != nil { + t.Fatal(err) + } + defer f.Close() + _, err = io.Copy(newf, f) + if err != nil { + t.Fatal(err) + } + + // Test file is ready so stop draining the discovery channel. + // It contains two target groups. + close(fileReady) + <-drainReady + newf.WriteString(" ") // One last meaningless write to trigger fsnotify and a new loop of the discovery service. + + timeout := time.After(15 * time.Second) +retry: + for { + select { + case <-timeout: + if expect { + t.Fatalf("Expected new target group but got none") + } else { + // Invalid type fsd should always break down. + break retry + } + case tgs := <-ch: + if !expect { + t.Fatalf("Unexpected target groups %s, we expected a failure here.", tgs) + } + + if len(tgs) != 2 { + continue retry // Potentially a partial write, just retry. + } + tg := tgs[0] + + if _, ok := tg.Labels["foo"]; !ok { + t.Fatalf("Label not parsed") + } + if tg.String() != filepath.Join(testDir, "_test_"+prefix+ext+":0") { + t.Fatalf("Unexpected target group %s", tg) + } + + tg = tgs[1] + if tg.String() != filepath.Join(testDir, "_test_"+prefix+ext+":1") { + t.Fatalf("Unexpected target groups %s", tg) + } + break retry + } + } + + // Based on unknown circumstances, sometimes fsnotify will trigger more events in + // some runs (which might be empty, chains of different operations etc.). + // We have to drain those (as the target manager would) to avoid deadlocking and must + // not try to make sense of it all... + drained := make(chan struct{}) + go func() { + for { + select { + case tgs := <-ch: + // Below we will change the file to a bad syntax. Previously extracted target + // groups must not be deleted via sending an empty target group. + if len(tgs[0].Targets) == 0 { + t.Errorf("Unexpected empty target groups received: %s", tgs) + } + case <-time.After(500 * time.Millisecond): + close(drained) + return + } + } + }() + + newf, err = os.Create(filepath.Join(testDir, "_test.new")) + if err != nil { + t.Fatal(err) + } + defer os.Remove(newf.Name()) + + if _, err := newf.Write([]byte("]gibberish\n][")); err != nil { + t.Fatal(err) + } + newf.Close() + + os.Rename(newf.Name(), filepath.Join(testDir, "_test_"+prefix+ext)) + + cancel() + <-drained +} diff --git a/src/prometheus/discovery/file/fixtures/invalid_nil.json b/src/prometheus/discovery/file/fixtures/invalid_nil.json new file mode 100644 index 0000000..0534ba4 --- /dev/null +++ b/src/prometheus/discovery/file/fixtures/invalid_nil.json @@ -0,0 +1,9 @@ +[ + { + "targets": ["localhost:9090", "example.org:443"], + "labels": { + "foo": "bar" + } + }, + null +] diff --git a/src/prometheus/discovery/file/fixtures/invalid_nil.yml b/src/prometheus/discovery/file/fixtures/invalid_nil.yml new file mode 100644 index 0000000..7618572 --- /dev/null +++ b/src/prometheus/discovery/file/fixtures/invalid_nil.yml @@ -0,0 +1,5 @@ +- targets: ['localhost:9090', 'example.org:443'] + labels: + foo: bar + +- null diff --git a/src/prometheus/discovery/file/fixtures/valid.json b/src/prometheus/discovery/file/fixtures/valid.json new file mode 100644 index 0000000..df4f0df --- /dev/null +++ b/src/prometheus/discovery/file/fixtures/valid.json @@ -0,0 +1,11 @@ +[ + { + "targets": ["localhost:9090", "example.org:443"], + "labels": { + "foo": "bar" + } + }, + { + "targets": ["my.domain"] + } +] diff --git a/src/prometheus/discovery/file/fixtures/valid.yml b/src/prometheus/discovery/file/fixtures/valid.yml new file mode 100644 index 0000000..5a4e6e5 --- /dev/null +++ b/src/prometheus/discovery/file/fixtures/valid.yml @@ -0,0 +1,5 @@ +- targets: ['localhost:9090', 'example.org:443'] + labels: + foo: bar + +- targets: ['my.domain'] diff --git a/src/prometheus/discovery/gce/gce.go b/src/prometheus/discovery/gce/gce.go new file mode 100644 index 0000000..b92336d --- /dev/null +++ b/src/prometheus/discovery/gce/gce.go @@ -0,0 +1,265 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gce + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" + + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + gceLabel = model.MetaLabelPrefix + "gce_" + gceLabelProject = gceLabel + "project" + gceLabelZone = gceLabel + "zone" + gceLabelNetwork = gceLabel + "network" + gceLabelSubnetwork = gceLabel + "subnetwork" + gceLabelPublicIP = gceLabel + "public_ip" + gceLabelPrivateIP = gceLabel + "private_ip" + gceLabelInstanceName = gceLabel + "instance_name" + gceLabelInstanceStatus = gceLabel + "instance_status" + gceLabelTags = gceLabel + "tags" + gceLabelMetadata = gceLabel + "metadata_" + gceLabelLabel = gceLabel + "label_" + gceLabelMachineType = gceLabel + "machine_type" +) + +var ( + gceSDRefreshFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_gce_refresh_failures_total", + Help: "The number of GCE-SD refresh failures.", + }) + gceSDRefreshDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_gce_refresh_duration", + Help: "The duration of a GCE-SD refresh in seconds.", + }) + // DefaultSDConfig is the default GCE SD configuration. + DefaultSDConfig = SDConfig{ + Port: 80, + TagSeparator: ",", + RefreshInterval: model.Duration(60 * time.Second), + } +) + +// SDConfig is the configuration for GCE based service discovery. +type SDConfig struct { + // Project: The Google Cloud Project ID + Project string `yaml:"project"` + + // Zone: The zone of the scrape targets. + // If you need to configure multiple zones use multiple gce_sd_configs + Zone string `yaml:"zone"` + + // Filter: Can be used optionally to filter the instance list by other criteria. + // Syntax of this filter string is described here in the filter query parameter section: + // https://cloud.google.com/compute/docs/reference/latest/instances/list + Filter string `yaml:"filter,omitempty"` + + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Port int `yaml:"port"` + TagSeparator string `yaml:"tag_separator,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.Project == "" { + return fmt.Errorf("GCE SD configuration requires a project") + } + if c.Zone == "" { + return fmt.Errorf("GCE SD configuration requires a zone") + } + return nil +} + +func init() { + prometheus.MustRegister(gceSDRefreshFailuresCount) + prometheus.MustRegister(gceSDRefreshDuration) +} + +// Discovery periodically performs GCE-SD requests. It implements +// the Discoverer interface. +type Discovery struct { + project string + zone string + filter string + client *http.Client + svc *compute.Service + isvc *compute.InstancesService + interval time.Duration + port int + tagSeparator string + logger log.Logger +} + +// NewDiscovery returns a new Discovery which periodically refreshes its targets. +func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { + if logger == nil { + logger = log.NewNopLogger() + } + gd := &Discovery{ + project: conf.Project, + zone: conf.Zone, + filter: conf.Filter, + interval: time.Duration(conf.RefreshInterval), + port: conf.Port, + tagSeparator: conf.TagSeparator, + logger: logger, + } + var err error + gd.client, err = google.DefaultClient(oauth2.NoContext, compute.ComputeReadonlyScope) + if err != nil { + return nil, fmt.Errorf("error setting up communication with GCE service: %s", err) + } + gd.svc, err = compute.New(gd.client) + if err != nil { + return nil, fmt.Errorf("error setting up communication with GCE service: %s", err) + } + gd.isvc = compute.NewInstancesService(gd.svc) + return gd, nil +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // Get an initial set right away. + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Refresh failed", "err", err) + } else { + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + } + } + + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Refresh failed", "err", err) + continue + } + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + } + case <-ctx.Done(): + return + } + } +} + +func (d *Discovery) refresh() (tg *targetgroup.Group, err error) { + t0 := time.Now() + defer func() { + gceSDRefreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + gceSDRefreshFailuresCount.Inc() + } + }() + + tg = &targetgroup.Group{ + Source: fmt.Sprintf("GCE_%s_%s", d.project, d.zone), + } + + ilc := d.isvc.List(d.project, d.zone) + if len(d.filter) > 0 { + ilc = ilc.Filter(d.filter) + } + err = ilc.Pages(context.TODO(), func(l *compute.InstanceList) error { + for _, inst := range l.Items { + if len(inst.NetworkInterfaces) == 0 { + continue + } + labels := model.LabelSet{ + gceLabelProject: model.LabelValue(d.project), + gceLabelZone: model.LabelValue(inst.Zone), + gceLabelInstanceName: model.LabelValue(inst.Name), + gceLabelInstanceStatus: model.LabelValue(inst.Status), + gceLabelMachineType: model.LabelValue(inst.MachineType), + } + priIface := inst.NetworkInterfaces[0] + labels[gceLabelNetwork] = model.LabelValue(priIface.Network) + labels[gceLabelSubnetwork] = model.LabelValue(priIface.Subnetwork) + labels[gceLabelPrivateIP] = model.LabelValue(priIface.NetworkIP) + addr := fmt.Sprintf("%s:%d", priIface.NetworkIP, d.port) + labels[model.AddressLabel] = model.LabelValue(addr) + + // Tags in GCE are usually only used for networking rules. + if inst.Tags != nil && len(inst.Tags.Items) > 0 { + // We surround the separated list with the separator as well. This way regular expressions + // in relabeling rules don't have to consider tag positions. + tags := d.tagSeparator + strings.Join(inst.Tags.Items, d.tagSeparator) + d.tagSeparator + labels[gceLabelTags] = model.LabelValue(tags) + } + + // GCE metadata are key-value pairs for user supplied attributes. + if inst.Metadata != nil { + for _, i := range inst.Metadata.Items { + // Protect against occasional nil pointers. + if i.Value == nil { + continue + } + name := strutil.SanitizeLabelName(i.Key) + labels[gceLabelMetadata+model.LabelName(name)] = model.LabelValue(*i.Value) + } + } + + // GCE labels are key-value pairs that group associated resources + if inst.Labels != nil { + for key, value := range inst.Labels { + name := strutil.SanitizeLabelName(key) + labels[gceLabelLabel+model.LabelName(name)] = model.LabelValue(value) + } + } + + if len(priIface.AccessConfigs) > 0 { + ac := priIface.AccessConfigs[0] + if ac.Type == "ONE_TO_ONE_NAT" { + labels[gceLabelPublicIP] = model.LabelValue(ac.NatIP) + } + } + tg.Targets = append(tg.Targets, labels) + } + return nil + }) + if err != nil { + return tg, fmt.Errorf("error retrieving refresh targets from gce: %s", err) + } + return tg, nil +} diff --git a/src/prometheus/discovery/kubernetes/endpoints.go b/src/prometheus/discovery/kubernetes/endpoints.go new file mode 100644 index 0000000..a9b041d --- /dev/null +++ b/src/prometheus/discovery/kubernetes/endpoints.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + apiv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +// Endpoints discovers new endpoint targets. +type Endpoints struct { + logger log.Logger + + endpointsInf cache.SharedInformer + serviceInf cache.SharedInformer + podInf cache.SharedInformer + + podStore cache.Store + endpointsStore cache.Store + serviceStore cache.Store + + queue *workqueue.Type +} + +// NewEndpoints returns a new endpoints discovery. +func NewEndpoints(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoints { + if l == nil { + l = log.NewNopLogger() + } + e := &Endpoints{ + logger: l, + endpointsInf: eps, + endpointsStore: eps.GetStore(), + serviceInf: svc, + serviceStore: svc.GetStore(), + podInf: pod, + podStore: pod.GetStore(), + queue: workqueue.NewNamed("endpoints"), + } + + e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(o interface{}) { + eventCount.WithLabelValues("endpoints", "add").Inc() + e.enqueue(o) + }, + UpdateFunc: func(_, o interface{}) { + eventCount.WithLabelValues("endpoints", "update").Inc() + e.enqueue(o) + }, + DeleteFunc: func(o interface{}) { + eventCount.WithLabelValues("endpoints", "delete").Inc() + e.enqueue(o) + }, + }) + + serviceUpdate := func(o interface{}) { + svc, err := convertToService(o) + if err != nil { + level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + return + } + + ep := &apiv1.Endpoints{} + ep.Namespace = svc.Namespace + ep.Name = svc.Name + obj, exists, err := e.endpointsStore.Get(ep) + if exists && err == nil { + e.enqueue(obj.(*apiv1.Endpoints)) + } + + if err != nil { + level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) + } + } + e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + // TODO(fabxc): potentially remove add and delete event handlers. Those should + // be triggered via the endpoint handlers already. + AddFunc: func(o interface{}) { + eventCount.WithLabelValues("service", "add").Inc() + serviceUpdate(o) + }, + UpdateFunc: func(_, o interface{}) { + eventCount.WithLabelValues("service", "update").Inc() + serviceUpdate(o) + }, + DeleteFunc: func(o interface{}) { + eventCount.WithLabelValues("service", "delete").Inc() + serviceUpdate(o) + }, + }) + + return e +} + +func (e *Endpoints) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + + e.queue.Add(key) +} + +// Run implements the Discoverer interface. +func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer e.queue.ShutDown() + + if !cache.WaitForCacheSync(ctx.Done(), e.endpointsInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced) { + level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache") + return + } + + go func() { + for e.process(ctx, ch) { + } + }() + + // Block until the target provider is explicitly canceled. + <-ctx.Done() +} + +func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { + keyObj, quit := e.queue.Get() + if quit { + return false + } + defer e.queue.Done(keyObj) + key := keyObj.(string) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + level.Error(e.logger).Log("msg", "spliting key failed", "key", key) + return true + } + + o, exists, err := e.endpointsStore.GetByKey(key) + if err != nil { + level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + return true + } + if !exists { + send(ctx, e.logger, RoleEndpoint, ch, &targetgroup.Group{Source: endpointsSourceFromNamespaceAndName(namespace, name)}) + return true + } + eps, err := convertToEndpoints(o) + if err != nil { + level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err) + return true + } + send(ctx, e.logger, RoleEndpoint, ch, e.buildEndpoints(eps)) + return true +} + +func convertToEndpoints(o interface{}) (*apiv1.Endpoints, error) { + endpoints, ok := o.(*apiv1.Endpoints) + if ok { + return endpoints, nil + } + + return nil, fmt.Errorf("Received unexpected object: %v", o) +} + +func endpointsSource(ep *apiv1.Endpoints) string { + return endpointsSourceFromNamespaceAndName(ep.Namespace, ep.Name) +} + +func endpointsSourceFromNamespaceAndName(namespace, name string) string { + return "endpoints/" + namespace + "/" + name +} + +const ( + endpointsNameLabel = metaLabelPrefix + "endpoints_name" + endpointReadyLabel = metaLabelPrefix + "endpoint_ready" + endpointPortNameLabel = metaLabelPrefix + "endpoint_port_name" + endpointPortProtocolLabel = metaLabelPrefix + "endpoint_port_protocol" + endpointAddressTargetKindLabel = metaLabelPrefix + "endpoint_address_target_kind" + endpointAddressTargetNameLabel = metaLabelPrefix + "endpoint_address_target_name" +) + +func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { + tg := &targetgroup.Group{ + Source: endpointsSource(eps), + } + tg.Labels = model.LabelSet{ + namespaceLabel: lv(eps.Namespace), + endpointsNameLabel: lv(eps.Name), + } + e.addServiceLabels(eps.Namespace, eps.Name, tg) + + type podEntry struct { + pod *apiv1.Pod + servicePorts []apiv1.EndpointPort + } + seenPods := map[string]*podEntry{} + + add := func(addr apiv1.EndpointAddress, port apiv1.EndpointPort, ready string) { + a := net.JoinHostPort(addr.IP, strconv.FormatUint(uint64(port.Port), 10)) + + target := model.LabelSet{ + model.AddressLabel: lv(a), + endpointPortNameLabel: lv(port.Name), + endpointPortProtocolLabel: lv(string(port.Protocol)), + endpointReadyLabel: lv(ready), + } + + if addr.TargetRef != nil { + target[model.LabelName(endpointAddressTargetKindLabel)] = lv(addr.TargetRef.Kind) + target[model.LabelName(endpointAddressTargetNameLabel)] = lv(addr.TargetRef.Name) + } + + pod := e.resolvePodRef(addr.TargetRef) + if pod == nil { + // This target is not a Pod, so don't continue with Pod specific logic. + tg.Targets = append(tg.Targets, target) + return + } + s := pod.Namespace + "/" + pod.Name + + sp, ok := seenPods[s] + if !ok { + sp = &podEntry{pod: pod} + seenPods[s] = sp + } + + // Attach standard pod labels. + target = target.Merge(podLabels(pod)) + + // Attach potential container port labels matching the endpoint port. + for _, c := range pod.Spec.Containers { + for _, cport := range c.Ports { + if port.Port == cport.ContainerPort { + ports := strconv.FormatUint(uint64(port.Port), 10) + + target[podContainerNameLabel] = lv(c.Name) + target[podContainerPortNameLabel] = lv(cport.Name) + target[podContainerPortNumberLabel] = lv(ports) + target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + break + } + } + } + + // Add service port so we know that we have already generated a target + // for it. + sp.servicePorts = append(sp.servicePorts, port) + tg.Targets = append(tg.Targets, target) + } + + for _, ss := range eps.Subsets { + for _, port := range ss.Ports { + for _, addr := range ss.Addresses { + add(addr, port, "true") + } + // Although this generates the same target again, as it was generated in + // the loop above, it causes the ready meta label to be overridden. + for _, addr := range ss.NotReadyAddresses { + add(addr, port, "false") + } + } + } + + // For all seen pods, check all container ports. If they were not covered + // by one of the service endpoints, generate targets for them. + for _, pe := range seenPods { + for _, c := range pe.pod.Spec.Containers { + for _, cport := range c.Ports { + hasSeenPort := func() bool { + for _, eport := range pe.servicePorts { + if cport.ContainerPort == eport.Port { + return true + } + } + return false + } + if hasSeenPort() { + continue + } + + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) + } + } + } + + return tg +} + +func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { + if ref == nil || ref.Kind != "Pod" { + return nil + } + p := &apiv1.Pod{} + p.Namespace = ref.Namespace + p.Name = ref.Name + + obj, exists, err := e.podStore.Get(p) + if err != nil || !exists { + return nil + } + if err != nil { + level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + } + return obj.(*apiv1.Pod) +} + +func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { + svc := &apiv1.Service{} + svc.Namespace = ns + svc.Name = name + + obj, exists, err := e.serviceStore.Get(svc) + if !exists || err != nil { + return + } + if err != nil { + level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + } + svc = obj.(*apiv1.Service) + + tg.Labels = tg.Labels.Merge(serviceLabels(svc)) +} diff --git a/src/prometheus/discovery/kubernetes/endpoints_test.go b/src/prometheus/discovery/kubernetes/endpoints_test.go new file mode 100644 index 0000000..35dd5b5 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/endpoints_test.go @@ -0,0 +1,605 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/pkg/api/v1" +) + +func makeEndpoints() *v1.Endpoints { + return &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "1.2.3.4", + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Addresses: []v1.EndpointAddress{ + { + IP: "2.3.4.5", + }, + }, + NotReadyAddresses: []v1.EndpointAddress{ + { + IP: "2.3.4.5", + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9001, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + } +} + +func TestEndpointsDiscoveryBeforeRun(t *testing.T) { + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + obj := makeEndpoints() + c.CoreV1().Endpoints(obj.Namespace).Create(obj) + w.Endpoints().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "false", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryAdd(t *testing.T) { + obj := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "c1", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Name: "c2", + Ports: []v1.ContainerPort{ + { + Name: "sideport", + ContainerPort: 9001, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "1.2.3.4", + }, + } + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, obj) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "default", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + } + c.CoreV1().Endpoints(obj.Namespace).Create(obj) + w.Endpoints().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_ip": "1.2.3.4", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_uid": "deadbeef", + }, + { + "__address__": "1.2.3.4:9001", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_ip": "1.2.3.4", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_container_name": "c2", + "__meta_kubernetes_pod_container_port_name": "sideport", + "__meta_kubernetes_pod_container_port_number": "9001", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_uid": "deadbeef", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_namespace": "default", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryDelete(t *testing.T) { + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeEndpoints() + c.CoreV1().Endpoints(obj.Namespace).Delete(obj.Name, &metav1.DeleteOptions{}) + w.Endpoints().Delete(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryUpdate(t *testing.T) { + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "1.2.3.4", + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + { + Addresses: []v1.EndpointAddress{ + { + IP: "2.3.4.5", + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9001, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + } + c.CoreV1().Endpoints(obj.Namespace).Update(obj) + w.Endpoints().Modify(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryEmptySubsets(t *testing.T) { + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + }, + Subsets: []v1.EndpointSubset{}, + } + c.CoreV1().Endpoints(obj.Namespace).Update(obj) + w.Endpoints().Modify(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryWithService(t *testing.T) { + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + obj := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + Labels: map[string]string{ + "app": "test", + }, + }, + } + c.CoreV1().Services(obj.Namespace).Create(obj) + w.Services().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "false", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app": "test", + "__meta_kubernetes_service_name": "testendpoints", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { + n, c, w := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + obj := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + Labels: map[string]string{ + "app": "test", + }, + }, + } + c.CoreV1().Services(obj.Namespace).Create(obj) + w.Services().Add(obj) + }, + afterStart: func() { + obj := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "default", + Labels: map[string]string{ + "app": "svc", + "component": "testing", + }, + }, + } + c.CoreV1().Services(obj.Namespace).Update(obj) + w.Services().Modify(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/default/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "false", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app": "svc", + "__meta_kubernetes_service_name": "testendpoints", + "__meta_kubernetes_service_label_component": "testing", + }, + Source: "endpoints/default/testendpoints", + }, + }, + }.Run(t) +} + +func TestEndpointsDiscoveryNamespaces(t *testing.T) { + epOne := makeEndpoints() + epOne.Namespace = "ns1" + objs := []runtime.Object{ + epOne, + &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "ns2", + }, + Subsets: []v1.EndpointSubset{ + { + Addresses: []v1.EndpointAddress{ + { + IP: "4.3.2.1", + TargetRef: &v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "ns2", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testendpoints", + Namespace: "ns1", + Labels: map[string]string{ + "app": "app1", + }, + }, + }, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns2", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "c1", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "2.3.4.5", + PodIP: "4.3.2.1", + }, + }, + } + n, _, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "endpoints/ns1/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, + { + "__address__": "2.3.4.5:9001", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "false", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "ns1", + "__meta_kubernetes_endpoints_name": "testendpoints", + "__meta_kubernetes_service_label_app": "app1", + "__meta_kubernetes_service_name": "testendpoints", + }, + Source: "endpoints/ns1/testendpoints", + }, + "endpoints/ns2/testendpoints": { + Targets: []model.LabelSet{ + { + "__address__": "4.3.2.1:9000", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_endpoint_address_target_kind": "Pod", + "__meta_kubernetes_endpoint_address_target_name": "testpod", + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_pod_ip": "4.3.2.1", + "__meta_kubernetes_pod_ready": "unknown", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_container_name": "c1", + "__meta_kubernetes_pod_container_port_name": "mainport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_uid": "deadbeef", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "ns2", + "__meta_kubernetes_endpoints_name": "testendpoints", + }, + Source: "endpoints/ns2/testendpoints", + }, + }, + }.Run(t) +} diff --git a/src/prometheus/discovery/kubernetes/ingress.go b/src/prometheus/discovery/kubernetes/ingress.go new file mode 100644 index 0000000..5925502 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/ingress.go @@ -0,0 +1,197 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +// Ingress implements discovery of Kubernetes ingresss. +type Ingress struct { + logger log.Logger + informer cache.SharedInformer + store cache.Store + queue *workqueue.Type +} + +// NewIngress returns a new ingress discovery. +func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress { + s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")} + s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(o interface{}) { + eventCount.WithLabelValues("ingress", "add").Inc() + s.enqueue(o) + }, + DeleteFunc: func(o interface{}) { + eventCount.WithLabelValues("ingress", "delete").Inc() + s.enqueue(o) + }, + UpdateFunc: func(_, o interface{}) { + eventCount.WithLabelValues("ingress", "update").Inc() + s.enqueue(o) + }, + }) + return s +} + +func (e *Ingress) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + + e.queue.Add(key) +} + +// Run implements the Discoverer interface. +func (s *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer s.queue.ShutDown() + + if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { + level.Error(s.logger).Log("msg", "ingress informer unable to sync cache") + return + } + + go func() { + for s.process(ctx, ch) { + } + }() + + // Block until the target provider is explicitly canceled. + <-ctx.Done() +} + +func (s *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { + keyObj, quit := s.queue.Get() + if quit { + return false + } + defer s.queue.Done(keyObj) + key := keyObj.(string) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return true + } + + o, exists, err := s.store.GetByKey(key) + if err != nil { + return true + } + if !exists { + send(ctx, s.logger, RoleIngress, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)}) + return true + } + eps, err := convertToIngress(o) + if err != nil { + level.Error(s.logger).Log("msg", "converting to Ingress object failed", "err", err) + return true + } + send(ctx, s.logger, RoleIngress, ch, s.buildIngress(eps)) + return true +} + +func convertToIngress(o interface{}) (*v1beta1.Ingress, error) { + ingress, ok := o.(*v1beta1.Ingress) + if ok { + return ingress, nil + } + + return nil, fmt.Errorf("Received unexpected object: %v", o) +} + +func ingressSource(s *v1beta1.Ingress) string { + return ingressSourceFromNamespaceAndName(s.Namespace, s.Name) +} + +func ingressSourceFromNamespaceAndName(namespace, name string) string { + return "ingress/" + namespace + "/" + name +} + +const ( + ingressNameLabel = metaLabelPrefix + "ingress_name" + ingressLabelPrefix = metaLabelPrefix + "ingress_label_" + ingressAnnotationPrefix = metaLabelPrefix + "ingress_annotation_" + ingressSchemeLabel = metaLabelPrefix + "ingress_scheme" + ingressHostLabel = metaLabelPrefix + "ingress_host" + ingressPathLabel = metaLabelPrefix + "ingress_path" +) + +func ingressLabels(ingress *v1beta1.Ingress) model.LabelSet { + ls := make(model.LabelSet, len(ingress.Labels)+len(ingress.Annotations)+2) + ls[ingressNameLabel] = lv(ingress.Name) + ls[namespaceLabel] = lv(ingress.Namespace) + + for k, v := range ingress.Labels { + ln := strutil.SanitizeLabelName(ingressLabelPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + + for k, v := range ingress.Annotations { + ln := strutil.SanitizeLabelName(ingressAnnotationPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + return ls +} + +func pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string { + if rv.HTTP == nil { + return []string{"/"} + } + paths := make([]string, len(rv.HTTP.Paths)) + for n, p := range rv.HTTP.Paths { + path := p.Path + if path == "" { + path = "/" + } + paths[n] = path + } + return paths +} + +func (s *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group { + tg := &targetgroup.Group{ + Source: ingressSource(ingress), + } + tg.Labels = ingressLabels(ingress) + + schema := "http" + if ingress.Spec.TLS != nil { + schema = "https" + } + for _, rule := range ingress.Spec.Rules { + paths := pathsFromIngressRule(&rule.IngressRuleValue) + + for _, path := range paths { + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: lv(rule.Host), + ingressSchemeLabel: lv(schema), + ingressHostLabel: lv(rule.Host), + ingressPathLabel: lv(path), + }) + } + } + + return tg +} diff --git a/src/prometheus/discovery/kubernetes/ingress_test.go b/src/prometheus/discovery/kubernetes/ingress_test.go new file mode 100644 index 0000000..b3832eb --- /dev/null +++ b/src/prometheus/discovery/kubernetes/ingress_test.go @@ -0,0 +1,157 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +func makeIngress(tls []v1beta1.IngressTLS) *v1beta1.Ingress { + return &v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testingress", + Namespace: "default", + Labels: map[string]string{"testlabel": "testvalue"}, + Annotations: map[string]string{"testannotation": "testannotationvalue"}, + }, + Spec: v1beta1.IngressSpec{ + TLS: tls, + Rules: []v1beta1.IngressRule{ + { + Host: "example.com", + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + {Path: "/"}, + {Path: "/foo"}, + }, + }, + }, + }, + { + // No backend config, ignored + Host: "nobackend.example.com", + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{}, + }, + }, + { + Host: "test.example.com", + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{{}}, + }, + }, + }, + }, + }, + } +} + +func expectedTargetGroups(ns string, tls bool) map[string]*targetgroup.Group { + scheme := "http" + if tls { + scheme = "https" + } + key := fmt.Sprintf("ingress/%s/testingress", ns) + return map[string]*targetgroup.Group{ + key: { + Targets: []model.LabelSet{ + { + "__meta_kubernetes_ingress_scheme": lv(scheme), + "__meta_kubernetes_ingress_host": "example.com", + "__meta_kubernetes_ingress_path": "/", + "__address__": "example.com", + }, + { + "__meta_kubernetes_ingress_scheme": lv(scheme), + "__meta_kubernetes_ingress_host": "example.com", + "__meta_kubernetes_ingress_path": "/foo", + "__address__": "example.com", + }, + { + "__meta_kubernetes_ingress_scheme": lv(scheme), + "__meta_kubernetes_ingress_host": "test.example.com", + "__address__": "test.example.com", + "__meta_kubernetes_ingress_path": "/", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_ingress_name": "testingress", + "__meta_kubernetes_namespace": lv(ns), + "__meta_kubernetes_ingress_label_testlabel": "testvalue", + "__meta_kubernetes_ingress_annotation_testannotation": "testannotationvalue", + }, + Source: key, + }, + } +} + +func TestIngressDiscoveryAdd(t *testing.T) { + n, c, w := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeIngress(nil) + c.ExtensionsV1beta1().Ingresses("default").Create(obj) + w.Ingresses().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: expectedTargetGroups("default", false), + }.Run(t) +} + +func TestIngressDiscoveryAddTLS(t *testing.T) { + n, c, w := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeIngress([]v1beta1.IngressTLS{{}}) + c.ExtensionsV1beta1().Ingresses("default").Create(obj) + w.Ingresses().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: expectedTargetGroups("default", true), + }.Run(t) +} + +func TestIngressDiscoveryNamespaces(t *testing.T) { + n, c, w := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) + + expected := expectedTargetGroups("ns1", false) + for k, v := range expectedTargetGroups("ns2", false) { + expected[k] = v + } + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + for _, ns := range []string{"ns1", "ns2"} { + obj := makeIngress(nil) + obj.Namespace = ns + c.ExtensionsV1beta1().Ingresses(obj.Namespace).Create(obj) + w.Ingresses().Add(obj) + } + }, + expectedMaxItems: 2, + expectedRes: expected, + }.Run(t) +} diff --git a/src/prometheus/discovery/kubernetes/kubernetes.go b/src/prometheus/discovery/kubernetes/kubernetes.go new file mode 100644 index 0000000..637a4eb --- /dev/null +++ b/src/prometheus/discovery/kubernetes/kubernetes.go @@ -0,0 +1,390 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "io/ioutil" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/pkg/api" + apiv1 "k8s.io/client-go/pkg/api/v1" + extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const ( + // kubernetesMetaLabelPrefix is the meta prefix used for all meta labels. + // in this discovery. + metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" + namespaceLabel = metaLabelPrefix + "namespace" +) + +var ( + eventCount = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_kubernetes_events_total", + Help: "The number of Kubernetes events handled.", + }, + []string{"role", "event"}, + ) + // DefaultSDConfig is the default Kubernetes SD configuration + DefaultSDConfig = SDConfig{} +) + +// Role is role of the service in Kubernetes. +type Role string + +// The valid options for Role. +const ( + RoleNode Role = "node" + RolePod Role = "pod" + RoleService Role = "service" + RoleEndpoint Role = "endpoints" + RoleIngress Role = "ingress" +) + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { + if err := unmarshal((*string)(c)); err != nil { + return err + } + switch *c { + case RoleNode, RolePod, RoleService, RoleEndpoint, RoleIngress: + return nil + default: + return fmt.Errorf("Unknown Kubernetes SD role %q", *c) + } +} + +// SDConfig is the configuration for Kubernetes service discovery. +type SDConfig struct { + APIServer config_util.URL `yaml:"api_server"` + Role Role `yaml:"role"` + BasicAuth *config_util.BasicAuth `yaml:"basic_auth,omitempty"` + BearerToken config_util.Secret `yaml:"bearer_token,omitempty"` + BearerTokenFile string `yaml:"bearer_token_file,omitempty"` + TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"` + NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = SDConfig{} + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.Role == "" { + return fmt.Errorf("role missing (one of: pod, service, endpoints, node)") + } + if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { + return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") + } + if c.BasicAuth != nil && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { + return fmt.Errorf("at most one of basic_auth, bearer_token & bearer_token_file must be configured") + } + if c.APIServer.URL == nil && + (c.BasicAuth != nil || c.BearerToken != "" || c.BearerTokenFile != "" || + c.TLSConfig.CAFile != "" || c.TLSConfig.CertFile != "" || c.TLSConfig.KeyFile != "") { + return fmt.Errorf("to use custom authentication please provide the 'api_server' URL explicitly") + } + return nil +} + +// NamespaceDiscovery is the configuration for discovering +// Kubernetes namespaces. +type NamespaceDiscovery struct { + Names []string `yaml:"names"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = NamespaceDiscovery{} + type plain NamespaceDiscovery + return unmarshal((*plain)(c)) +} + +func init() { + prometheus.MustRegister(eventCount) + + // Initialize metric vectors. + for _, role := range []string{"endpoints", "node", "pod", "service"} { + for _, evt := range []string{"add", "delete", "update"} { + eventCount.WithLabelValues(role, evt) + } + } +} + +// This is only for internal use. +type discoverer interface { + Run(ctx context.Context, up chan<- []*targetgroup.Group) +} + +// Discovery implements the discoverer interface for discovering +// targets from Kubernetes. +type Discovery struct { + sync.RWMutex + client kubernetes.Interface + role Role + logger log.Logger + namespaceDiscovery *NamespaceDiscovery + discoverers []discoverer +} + +func (d *Discovery) getNamespaces() []string { + namespaces := d.namespaceDiscovery.Names + if len(namespaces) == 0 { + namespaces = []string{api.NamespaceAll} + } + return namespaces +} + +// New creates a new Kubernetes discovery for the given role. +func New(l log.Logger, conf *SDConfig) (*Discovery, error) { + if l == nil { + l = log.NewNopLogger() + } + var ( + kcfg *rest.Config + err error + ) + if conf.APIServer.URL == nil { + // Use the Kubernetes provided pod service account + // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ + kcfg, err = rest.InClusterConfig() + if err != nil { + return nil, err + } + // Because the handling of configuration parameters changes + // we should inform the user when their currently configured values + // will be ignored due to precedence of InClusterConfig + level.Info(l).Log("msg", "Using pod service account via in-cluster config") + + if conf.TLSConfig.CAFile != "" { + level.Warn(l).Log("msg", "Configured TLS CA file is ignored when using pod service account") + } + if conf.TLSConfig.CertFile != "" || conf.TLSConfig.KeyFile != "" { + level.Warn(l).Log("msg", "Configured TLS client certificate is ignored when using pod service account") + } + if conf.BearerToken != "" { + level.Warn(l).Log("msg", "Configured auth token is ignored when using pod service account") + } + if conf.BasicAuth != nil { + level.Warn(l).Log("msg", "Configured basic authentication credentials are ignored when using pod service account") + } + } else { + kcfg = &rest.Config{ + Host: conf.APIServer.String(), + TLSClientConfig: rest.TLSClientConfig{ + CAFile: conf.TLSConfig.CAFile, + CertFile: conf.TLSConfig.CertFile, + KeyFile: conf.TLSConfig.KeyFile, + Insecure: conf.TLSConfig.InsecureSkipVerify, + }, + } + token := string(conf.BearerToken) + if conf.BearerTokenFile != "" { + bf, err := ioutil.ReadFile(conf.BearerTokenFile) + if err != nil { + return nil, err + } + token = string(bf) + } + kcfg.BearerToken = token + + if conf.BasicAuth != nil { + kcfg.Username = conf.BasicAuth.Username + kcfg.Password = string(conf.BasicAuth.Password) + } + } + + kcfg.UserAgent = "prometheus/discovery" + + c, err := kubernetes.NewForConfig(kcfg) + if err != nil { + return nil, err + } + return &Discovery{ + client: c, + logger: l, + role: conf.Role, + namespaceDiscovery: &conf.NamespaceDiscovery, + discoverers: make([]discoverer, 0), + }, nil +} + +const resyncPeriod = 10 * time.Minute + +// Run implements the discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + d.Lock() + namespaces := d.getNamespaces() + + switch d.role { + case RoleEndpoint: + for _, namespace := range namespaces { + e := d.client.CoreV1().Endpoints(namespace) + elw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return e.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return e.Watch(options) + }, + } + s := d.client.CoreV1().Services(namespace) + slw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return s.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return s.Watch(options) + }, + } + p := d.client.CoreV1().Pods(namespace) + plw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return p.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return p.Watch(options) + }, + } + eps := NewEndpoints( + log.With(d.logger, "role", "endpoint"), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), + cache.NewSharedInformer(elw, &apiv1.Endpoints{}, resyncPeriod), + cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod), + ) + d.discoverers = append(d.discoverers, eps) + go eps.endpointsInf.Run(ctx.Done()) + go eps.serviceInf.Run(ctx.Done()) + go eps.podInf.Run(ctx.Done()) + } + case RolePod: + for _, namespace := range namespaces { + p := d.client.CoreV1().Pods(namespace) + plw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return p.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return p.Watch(options) + }, + } + pod := NewPod( + log.With(d.logger, "role", "pod"), + cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod), + ) + d.discoverers = append(d.discoverers, pod) + go pod.informer.Run(ctx.Done()) + } + case RoleService: + for _, namespace := range namespaces { + s := d.client.CoreV1().Services(namespace) + slw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return s.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return s.Watch(options) + }, + } + svc := NewService( + log.With(d.logger, "role", "service"), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), + ) + d.discoverers = append(d.discoverers, svc) + go svc.informer.Run(ctx.Done()) + } + case RoleIngress: + for _, namespace := range namespaces { + i := d.client.ExtensionsV1beta1().Ingresses(namespace) + ilw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return i.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return i.Watch(options) + }, + } + ingress := NewIngress( + log.With(d.logger, "role", "ingress"), + cache.NewSharedInformer(ilw, &extensionsv1beta1.Ingress{}, resyncPeriod), + ) + d.discoverers = append(d.discoverers, ingress) + go ingress.informer.Run(ctx.Done()) + } + case RoleNode: + nlw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return d.client.CoreV1().Nodes().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return d.client.CoreV1().Nodes().Watch(options) + }, + } + node := NewNode( + log.With(d.logger, "role", "node"), + cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod), + ) + d.discoverers = append(d.discoverers, node) + go node.informer.Run(ctx.Done()) + default: + level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role) + } + + var wg sync.WaitGroup + for _, dd := range d.discoverers { + wg.Add(1) + go func(d discoverer) { + defer wg.Done() + d.Run(ctx, ch) + }(dd) + } + + d.Unlock() + <-ctx.Done() +} + +func lv(s string) model.LabelValue { + return model.LabelValue(s) +} + +func send(ctx context.Context, l log.Logger, role Role, ch chan<- []*targetgroup.Group, tg *targetgroup.Group) { + if tg == nil { + return + } + level.Debug(l).Log("msg", "kubernetes discovery update", "role", string(role), "tg", fmt.Sprintf("%#v", tg)) + select { + case <-ctx.Done(): + case ch <- []*targetgroup.Group{tg}: + } +} diff --git a/src/prometheus/discovery/kubernetes/kubernetes_test.go b/src/prometheus/discovery/kubernetes/kubernetes_test.go new file mode 100644 index 0000000..bcc2fb9 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/kubernetes_test.go @@ -0,0 +1,236 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "encoding/json" + "sync" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" +) + +type watcherFactory struct { + sync.RWMutex + watchers map[schema.GroupVersionResource]*watch.FakeWatcher +} + +func (wf *watcherFactory) watchFor(gvr schema.GroupVersionResource) *watch.FakeWatcher { + wf.Lock() + defer wf.Unlock() + + var fakewatch *watch.FakeWatcher + fakewatch, ok := wf.watchers[gvr] + if !ok { + fakewatch = watch.NewFakeWithChanSize(128, true) + wf.watchers[gvr] = fakewatch + } + return fakewatch +} + +func (wf *watcherFactory) Nodes() *watch.FakeWatcher { + return wf.watchFor(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}) +} + +func (wf *watcherFactory) Ingresses() *watch.FakeWatcher { + return wf.watchFor(schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}) +} + +func (wf *watcherFactory) Endpoints() *watch.FakeWatcher { + return wf.watchFor(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"}) +} + +func (wf *watcherFactory) Services() *watch.FakeWatcher { + return wf.watchFor(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}) +} + +func (wf *watcherFactory) Pods() *watch.FakeWatcher { + return wf.watchFor(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}) +} + +// makeDiscovery creates a kubernetes.Discovery instance for testing. +func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface, *watcherFactory) { + clientset := fake.NewSimpleClientset(objects...) + // Current client-go we are using does not support push event on + // Add/Update/Create, so we need to emit event manually. + // See https://github.com/kubernetes/kubernetes/issues/54075. + // TODO update client-go thChanSizeand related packages to kubernetes-1.10.0+ + wf := &watcherFactory{ + watchers: make(map[schema.GroupVersionResource]*watch.FakeWatcher), + } + clientset.PrependWatchReactor("*", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + return true, wf.watchFor(gvr), nil + }) + return &Discovery{ + client: clientset, + logger: log.NewNopLogger(), + role: role, + namespaceDiscovery: &nsDiscovery, + }, clientset, wf +} + +type k8sDiscoveryTest struct { + // discovery is instance of discovery.Discoverer + discovery discoverer + // beforeRun runs before discoverer run + beforeRun func() + // afterStart runs after discoverer has synced + afterStart func() + // expectedMaxItems is expected max items we may get from channel + expectedMaxItems int + // expectedRes is expected final result + expectedRes map[string]*targetgroup.Group +} + +func (d k8sDiscoveryTest) Run(t *testing.T) { + ch := make(chan []*targetgroup.Group) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + if d.beforeRun != nil { + d.beforeRun() + } + + // Run discoverer and start a goroutine to read results. + go d.discovery.Run(ctx, ch) + resChan := make(chan map[string]*targetgroup.Group) + go readResultWithTimeout(t, ch, d.expectedMaxItems, time.Second, resChan) + + dd, ok := d.discovery.(hasSynced) + if !ok { + t.Errorf("discoverer does not implement hasSynced interface") + return + } + if !cache.WaitForCacheSync(ctx.Done(), dd.hasSynced) { + t.Errorf("discoverer failed to sync: %v", dd) + return + } + + if d.afterStart != nil { + d.afterStart() + } + + if d.expectedRes != nil { + res := <-resChan + requireTargetGroups(t, d.expectedRes, res) + } +} + +// readResultWithTimeout reads all targegroups from channel with timeout. +// It merges targegroups by source and sends the result to result channel. +func readResultWithTimeout(t *testing.T, ch <-chan []*targetgroup.Group, max int, timeout time.Duration, resChan chan<- map[string]*targetgroup.Group) { + allTgs := make([][]*targetgroup.Group, 0) + +Loop: + for { + select { + case tgs := <-ch: + allTgs = append(allTgs, tgs) + if len(allTgs) == max { + // Reached max target groups we may get, break fast. + break Loop + } + case <-time.After(timeout): + // Because we use queue, an object that is created then + // deleted or updated may be processed only once. + // So possibliy we may skip events, timed out here. + t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(allTgs), max) + break Loop + } + } + + // Merge by source and sent it to channel. + res := make(map[string]*targetgroup.Group) + for _, tgs := range allTgs { + for _, tg := range tgs { + if tg == nil { + continue + } + res[tg.Source] = tg + } + } + resChan <- res +} + +func requireTargetGroups(t *testing.T, expected, res map[string]*targetgroup.Group) { + b1, err := json.Marshal(expected) + if err != nil { + panic(err) + } + b2, err := json.Marshal(res) + if err != nil { + panic(err) + } + + require.JSONEq(t, string(b1), string(b2)) +} + +type hasSynced interface { + // hasSynced returns true if all informers synced. + // This is only used in testing to determine when discoverer synced to + // kubernetes apiserver. + hasSynced() bool +} + +var _ hasSynced = &Discovery{} +var _ hasSynced = &Node{} +var _ hasSynced = &Endpoints{} +var _ hasSynced = &Ingress{} +var _ hasSynced = &Pod{} +var _ hasSynced = &Service{} + +func (d *Discovery) hasSynced() bool { + d.RLock() + defer d.RUnlock() + for _, discoverer := range d.discoverers { + if hasSynceddiscoverer, ok := discoverer.(hasSynced); ok { + if !hasSynceddiscoverer.hasSynced() { + return false + } + } + } + return true +} + +func (n *Node) hasSynced() bool { + return n.informer.HasSynced() +} + +func (e *Endpoints) hasSynced() bool { + return e.endpointsInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced() +} + +func (i *Ingress) hasSynced() bool { + return i.informer.HasSynced() +} + +func (p *Pod) hasSynced() bool { + return p.informer.HasSynced() +} + +func (s *Service) hasSynced() bool { + return s.informer.HasSynced() +} diff --git a/src/prometheus/discovery/kubernetes/node.go b/src/prometheus/discovery/kubernetes/node.go new file mode 100644 index 0000000..2e1e0aa --- /dev/null +++ b/src/prometheus/discovery/kubernetes/node.go @@ -0,0 +1,215 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" + "k8s.io/client-go/pkg/api" + apiv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +// Node discovers Kubernetes nodes. +type Node struct { + logger log.Logger + informer cache.SharedInformer + store cache.Store + queue *workqueue.Type +} + +// NewNode returns a new node discovery. +func NewNode(l log.Logger, inf cache.SharedInformer) *Node { + if l == nil { + l = log.NewNopLogger() + } + n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")} + n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(o interface{}) { + eventCount.WithLabelValues("node", "add").Inc() + n.enqueue(o) + }, + DeleteFunc: func(o interface{}) { + eventCount.WithLabelValues("node", "delete").Inc() + n.enqueue(o) + }, + UpdateFunc: func(_, o interface{}) { + eventCount.WithLabelValues("node", "update").Inc() + n.enqueue(o) + }, + }) + return n +} + +func (e *Node) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + + e.queue.Add(key) +} + +// Run implements the Discoverer interface. +func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer n.queue.ShutDown() + + if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { + level.Error(n.logger).Log("msg", "node informer unable to sync cache") + return + } + + go func() { + for n.process(ctx, ch) { + } + }() + + // Block until the target provider is explicitly canceled. + <-ctx.Done() +} + +func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { + keyObj, quit := n.queue.Get() + if quit { + return false + } + defer n.queue.Done(keyObj) + key := keyObj.(string) + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return true + } + + o, exists, err := n.store.GetByKey(key) + if err != nil { + return true + } + if !exists { + send(ctx, n.logger, RoleNode, ch, &targetgroup.Group{Source: nodeSourceFromName(name)}) + return true + } + node, err := convertToNode(o) + if err != nil { + level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err) + return true + } + send(ctx, n.logger, RoleNode, ch, n.buildNode(node)) + return true +} + +func convertToNode(o interface{}) (*apiv1.Node, error) { + node, ok := o.(*apiv1.Node) + if ok { + return node, nil + } + + return nil, fmt.Errorf("Received unexpected object: %v", o) +} + +func nodeSource(n *apiv1.Node) string { + return nodeSourceFromName(n.Name) +} + +func nodeSourceFromName(name string) string { + return "node/" + name +} + +const ( + nodeNameLabel = metaLabelPrefix + "node_name" + nodeLabelPrefix = metaLabelPrefix + "node_label_" + nodeAnnotationPrefix = metaLabelPrefix + "node_annotation_" + nodeAddressPrefix = metaLabelPrefix + "node_address_" +) + +func nodeLabels(n *apiv1.Node) model.LabelSet { + ls := make(model.LabelSet, len(n.Labels)+len(n.Annotations)+1) + + ls[nodeNameLabel] = lv(n.Name) + + for k, v := range n.Labels { + ln := strutil.SanitizeLabelName(nodeLabelPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + + for k, v := range n.Annotations { + ln := strutil.SanitizeLabelName(nodeAnnotationPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + return ls +} + +func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { + tg := &targetgroup.Group{ + Source: nodeSource(node), + } + tg.Labels = nodeLabels(node) + + addr, addrMap, err := nodeAddress(node) + if err != nil { + level.Warn(n.logger).Log("msg", "No node address found", "err", err) + return nil + } + addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) + + t := model.LabelSet{ + model.AddressLabel: lv(addr), + model.InstanceLabel: lv(node.Name), + } + + for ty, a := range addrMap { + ln := strutil.SanitizeLabelName(nodeAddressPrefix + string(ty)) + t[model.LabelName(ln)] = lv(a[0]) + } + tg.Targets = append(tg.Targets, t) + + return tg +} + +// nodeAddresses returns the provided node's address, based on the priority: +// 1. NodeInternalIP +// 2. NodeExternalIP +// 3. NodeLegacyHostIP +// 3. NodeHostName +// +// Derived from k8s.io/kubernetes/pkg/util/node/node.go +func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { + m := map[apiv1.NodeAddressType][]string{} + for _, a := range node.Status.Addresses { + m[a.Type] = append(m[a.Type], a.Address) + } + + if addresses, ok := m[apiv1.NodeInternalIP]; ok { + return addresses[0], m, nil + } + if addresses, ok := m[apiv1.NodeExternalIP]; ok { + return addresses[0], m, nil + } + if addresses, ok := m[apiv1.NodeAddressType(api.NodeLegacyHostIP)]; ok { + return addresses[0], m, nil + } + if addresses, ok := m[apiv1.NodeHostName]; ok { + return addresses[0], m, nil + } + return "", m, fmt.Errorf("host address unknown") +} diff --git a/src/prometheus/discovery/kubernetes/node_test.go b/src/prometheus/discovery/kubernetes/node_test.go new file mode 100644 index 0000000..8a02175 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/node_test.go @@ -0,0 +1,173 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + Annotations: annotations, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: address, + }, + }, + DaemonEndpoints: v1.NodeDaemonEndpoints{ + KubeletEndpoint: v1.DaemonEndpoint{ + Port: 10250, + }, + }, + }, + } +} + +func makeEnumeratedNode(i int) *v1.Node { + return makeNode(fmt.Sprintf("test%d", i), "1.2.3.4", map[string]string{}, map[string]string{}) +} + +func TestNodeDiscoveryBeforeStart(t *testing.T) { + n, c, w := makeDiscovery(RoleNode, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + obj := makeNode( + "test", + "1.2.3.4", + map[string]string{"testlabel": "testvalue"}, + map[string]string{"testannotation": "testannotationvalue"}, + ) + c.CoreV1().Nodes().Create(obj) + w.Nodes().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "node/test": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:10250", + "instance": "test", + "__meta_kubernetes_node_address_InternalIP": "1.2.3.4", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_node_name": "test", + "__meta_kubernetes_node_label_testlabel": "testvalue", + "__meta_kubernetes_node_annotation_testannotation": "testannotationvalue", + }, + Source: "node/test", + }, + }, + }.Run(t) +} + +func TestNodeDiscoveryAdd(t *testing.T) { + n, c, w := makeDiscovery(RoleNode, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeEnumeratedNode(1) + c.CoreV1().Nodes().Create(obj) + w.Nodes().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "node/test1": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:10250", + "instance": "test1", + "__meta_kubernetes_node_address_InternalIP": "1.2.3.4", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_node_name": "test1", + }, + Source: "node/test1", + }, + }, + }.Run(t) +} + +func TestNodeDiscoveryDelete(t *testing.T) { + obj := makeEnumeratedNode(0) + n, c, w := makeDiscovery(RoleNode, NamespaceDiscovery{}, obj) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + c.CoreV1().Nodes().Delete(obj.Name, &metav1.DeleteOptions{}) + w.Nodes().Delete(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "node/test0": { + Source: "node/test0", + }, + }, + }.Run(t) +} + +func TestNodeDiscoveryUpdate(t *testing.T) { + n, c, w := makeDiscovery(RoleNode, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj1 := makeEnumeratedNode(0) + c.CoreV1().Nodes().Create(obj1) + w.Nodes().Add(obj1) + obj2 := makeNode( + "test0", + "1.2.3.4", + map[string]string{"Unschedulable": "true"}, + map[string]string{}, + ) + c.CoreV1().Nodes().Update(obj2) + w.Nodes().Modify(obj2) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "node/test0": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:10250", + "instance": "test0", + "__meta_kubernetes_node_address_InternalIP": "1.2.3.4", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_node_label_Unschedulable": "true", + "__meta_kubernetes_node_name": "test0", + }, + Source: "node/test0", + }, + }, + }.Run(t) +} diff --git a/src/prometheus/discovery/kubernetes/pod.go b/src/prometheus/discovery/kubernetes/pod.go new file mode 100644 index 0000000..986fcb8 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/pod.go @@ -0,0 +1,256 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" + apiv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +// Pod discovers new pod targets. +type Pod struct { + informer cache.SharedInformer + store cache.Store + logger log.Logger + queue *workqueue.Type +} + +// NewPod creates a new pod discovery. +func NewPod(l log.Logger, pods cache.SharedInformer) *Pod { + if l == nil { + l = log.NewNopLogger() + } + p := &Pod{ + informer: pods, + store: pods.GetStore(), + logger: l, + queue: workqueue.NewNamed("pod"), + } + p.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(o interface{}) { + eventCount.WithLabelValues("pod", "add").Inc() + p.enqueue(o) + }, + DeleteFunc: func(o interface{}) { + eventCount.WithLabelValues("pod", "delete").Inc() + p.enqueue(o) + }, + UpdateFunc: func(_, o interface{}) { + eventCount.WithLabelValues("pod", "update").Inc() + p.enqueue(o) + }, + }) + return p +} + +func (e *Pod) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + + e.queue.Add(key) +} + +// Run implements the Discoverer interface. +func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer p.queue.ShutDown() + + if !cache.WaitForCacheSync(ctx.Done(), p.informer.HasSynced) { + level.Error(p.logger).Log("msg", "pod informer unable to sync cache") + return + } + + go func() { + for p.process(ctx, ch) { + } + }() + + // Block until the target provider is explicitly canceled. + <-ctx.Done() +} + +func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { + keyObj, quit := p.queue.Get() + if quit { + return false + } + defer p.queue.Done(keyObj) + key := keyObj.(string) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return true + } + + o, exists, err := p.store.GetByKey(key) + if err != nil { + return true + } + if !exists { + send(ctx, p.logger, RolePod, ch, &targetgroup.Group{Source: podSourceFromNamespaceAndName(namespace, name)}) + return true + } + eps, err := convertToPod(o) + if err != nil { + level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err) + return true + } + send(ctx, p.logger, RolePod, ch, p.buildPod(eps)) + return true +} + +func convertToPod(o interface{}) (*apiv1.Pod, error) { + pod, ok := o.(*apiv1.Pod) + if ok { + return pod, nil + } + + return nil, fmt.Errorf("Received unexpected object: %v", o) +} + +const ( + podNameLabel = metaLabelPrefix + "pod_name" + podIPLabel = metaLabelPrefix + "pod_ip" + podContainerNameLabel = metaLabelPrefix + "pod_container_name" + podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" + podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" + podContainerPortProtocolLabel = metaLabelPrefix + "pod_container_port_protocol" + podReadyLabel = metaLabelPrefix + "pod_ready" + podLabelPrefix = metaLabelPrefix + "pod_label_" + podAnnotationPrefix = metaLabelPrefix + "pod_annotation_" + podNodeNameLabel = metaLabelPrefix + "pod_node_name" + podHostIPLabel = metaLabelPrefix + "pod_host_ip" + podUID = metaLabelPrefix + "pod_uid" + podControllerKind = metaLabelPrefix + "pod_controller_kind" + podControllerName = metaLabelPrefix + "pod_controller_name" +) + +// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller +// https://github.com/kubernetes/apimachinery/blob/cd2cae2b39fa57e8063fa1f5f13cfe9862db3d41/pkg/apis/meta/v1/controller_ref.go +func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference { + for _, ref := range controllee.GetOwnerReferences() { + if ref.Controller != nil && *ref.Controller { + return &ref + } + } + return nil +} + +func podLabels(pod *apiv1.Pod) model.LabelSet { + ls := model.LabelSet{ + podNameLabel: lv(pod.ObjectMeta.Name), + podIPLabel: lv(pod.Status.PodIP), + podReadyLabel: podReady(pod), + podNodeNameLabel: lv(pod.Spec.NodeName), + podHostIPLabel: lv(pod.Status.HostIP), + podUID: lv(string(pod.ObjectMeta.UID)), + } + + createdBy := GetControllerOf(pod) + if createdBy != nil { + if createdBy.Kind != "" { + ls[podControllerKind] = lv(createdBy.Kind) + } + if createdBy.Name != "" { + ls[podControllerName] = lv(createdBy.Name) + } + } + + for k, v := range pod.Labels { + ln := strutil.SanitizeLabelName(podLabelPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + + for k, v := range pod.Annotations { + ln := strutil.SanitizeLabelName(podAnnotationPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + + return ls +} + +func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { + tg := &targetgroup.Group{ + Source: podSource(pod), + } + // PodIP can be empty when a pod is starting or has been evicted. + if len(pod.Status.PodIP) == 0 { + return tg + } + + tg.Labels = podLabels(pod) + tg.Labels[namespaceLabel] = lv(pod.Namespace) + + for _, c := range pod.Spec.Containers { + // If no ports are defined for the container, create an anonymous + // target per container. + if len(c.Ports) == 0 { + // We don't have a port so we just set the address label to the pod IP. + // The user has to add a port manually. + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: lv(pod.Status.PodIP), + podContainerNameLabel: lv(c.Name), + }) + continue + } + // Otherwise create one target for each container/port combination. + for _, port := range c.Ports { + ports := strconv.FormatUint(uint64(port.ContainerPort), 10) + addr := net.JoinHostPort(pod.Status.PodIP, ports) + + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: lv(addr), + podContainerNameLabel: lv(c.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortNameLabel: lv(port.Name), + podContainerPortProtocolLabel: lv(string(port.Protocol)), + }) + } + } + + return tg +} + +func podSource(pod *apiv1.Pod) string { + return podSourceFromNamespaceAndName(pod.Namespace, pod.Name) +} + +func podSourceFromNamespaceAndName(namespace, name string) string { + return "pod/" + namespace + "/" + name +} + +func podReady(pod *apiv1.Pod) model.LabelValue { + for _, cond := range pod.Status.Conditions { + if cond.Type == apiv1.PodReady { + return lv(strings.ToLower(string(cond.Status))) + } + } + return lv(strings.ToLower(string(api.ConditionUnknown))) +} diff --git a/src/prometheus/discovery/kubernetes/pod_test.go b/src/prometheus/discovery/kubernetes/pod_test.go new file mode 100644 index 0000000..b921bf9 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/pod_test.go @@ -0,0 +1,319 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/pkg/api/v1" +) + +func makeOptionalBool(v bool) *bool { + return &v +} + +func makeMultiPortPods() *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + Labels: map[string]string{"testlabel": "testvalue"}, + Annotations: map[string]string{"testannotation": "testannotationvalue"}, + UID: types.UID("abc123"), + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "testcontrollerkind", + Name: "testcontrollername", + Controller: makeOptionalBool(true), + }, + }, + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "testcontainer0", + Ports: []v1.ContainerPort{ + { + Name: "testport0", + Protocol: v1.ProtocolTCP, + ContainerPort: int32(9000), + }, + { + Name: "testport1", + Protocol: v1.ProtocolUDP, + ContainerPort: int32(9001), + }, + }, + }, + { + Name: "testcontainer1", + }, + }, + }, + Status: v1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "2.3.4.5", + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + }, + }, + } +} + +func makePods() *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: types.UID("abc123"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "testcontainer", + Ports: []v1.ContainerPort{ + { + Name: "testport", + Protocol: v1.ProtocolTCP, + ContainerPort: int32(9000), + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "2.3.4.5", + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + }, + }, + } +} + +func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group { + key := fmt.Sprintf("pod/%s/testpod", ns) + return map[string]*targetgroup.Group{ + key: { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_pod_container_name": "testcontainer", + "__meta_kubernetes_pod_container_port_name": "testport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_namespace": lv(ns), + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_ip": "1.2.3.4", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ready": "true", + "__meta_kubernetes_pod_uid": "abc123", + }, + Source: key, + }, + } +} + +func TestPodDiscoveryBeforeRun(t *testing.T) { + n, c, w := makeDiscovery(RolePod, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + obj := makeMultiPortPods() + c.CoreV1().Pods(obj.Namespace).Create(obj) + w.Pods().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "pod/default/testpod": { + Targets: []model.LabelSet{ + { + "__address__": "1.2.3.4:9000", + "__meta_kubernetes_pod_container_name": "testcontainer0", + "__meta_kubernetes_pod_container_port_name": "testport0", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + }, + { + "__address__": "1.2.3.4:9001", + "__meta_kubernetes_pod_container_name": "testcontainer0", + "__meta_kubernetes_pod_container_port_name": "testport1", + "__meta_kubernetes_pod_container_port_number": "9001", + "__meta_kubernetes_pod_container_port_protocol": "UDP", + }, + { + "__address__": "1.2.3.4", + "__meta_kubernetes_pod_container_name": "testcontainer1", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_pod_name": "testpod", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_pod_label_testlabel": "testvalue", + "__meta_kubernetes_pod_annotation_testannotation": "testannotationvalue", + "__meta_kubernetes_pod_node_name": "testnode", + "__meta_kubernetes_pod_ip": "1.2.3.4", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_ready": "true", + "__meta_kubernetes_pod_uid": "abc123", + "__meta_kubernetes_pod_controller_kind": "testcontrollerkind", + "__meta_kubernetes_pod_controller_name": "testcontrollername", + }, + Source: "pod/default/testpod", + }, + }, + }.Run(t) +} + +func TestPodDiscoveryAdd(t *testing.T) { + n, c, w := makeDiscovery(RolePod, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makePods() + c.CoreV1().Pods(obj.Namespace).Create(obj) + w.Pods().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: expectedPodTargetGroups("default"), + }.Run(t) +} + +func TestPodDiscoveryDelete(t *testing.T) { + obj := makePods() + n, c, w := makeDiscovery(RolePod, NamespaceDiscovery{}, obj) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makePods() + c.CoreV1().Pods(obj.Namespace).Delete(obj.Name, &metav1.DeleteOptions{}) + w.Pods().Delete(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "pod/default/testpod": { + Source: "pod/default/testpod", + }, + }, + }.Run(t) +} + +func TestPodDiscoveryUpdate(t *testing.T) { + obj := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "default", + UID: "xyz321", + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "testcontainer", + Ports: []v1.ContainerPort{ + { + Name: "testport", + Protocol: v1.ProtocolTCP, + ContainerPort: int32(9000), + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "2.3.4.5", + }, + } + n, c, w := makeDiscovery(RolePod, NamespaceDiscovery{}, obj) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makePods() + c.CoreV1().Pods(obj.Namespace).Create(obj) + w.Pods().Modify(obj) + }, + expectedMaxItems: 2, + expectedRes: expectedPodTargetGroups("default"), + }.Run(t) +} + +func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) { + n, c, w := makeDiscovery(RolePod, NamespaceDiscovery{}) + initialPod := makePods() + + updatedPod := makePods() + updatedPod.Status.PodIP = "" + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + c.CoreV1().Pods(initialPod.Namespace).Create(initialPod) + w.Pods().Add(initialPod) + }, + afterStart: func() { + c.CoreV1().Pods(updatedPod.Namespace).Create(updatedPod) + w.Pods().Modify(updatedPod) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "pod/default/testpod": { + Source: "pod/default/testpod", + }, + }, + }.Run(t) +} + +func TestPodDiscoveryNamespaces(t *testing.T) { + n, c, w := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) + + expected := expectedPodTargetGroups("ns1") + for k, v := range expectedPodTargetGroups("ns2") { + expected[k] = v + } + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + for _, ns := range []string{"ns1", "ns2"} { + pod := makePods() + pod.Namespace = ns + c.CoreV1().Pods(pod.Namespace).Create(pod) + w.Pods().Add(pod) + } + }, + expectedMaxItems: 2, + expectedRes: expected, + }.Run(t) +} diff --git a/src/prometheus/discovery/kubernetes/service.go b/src/prometheus/discovery/kubernetes/service.go new file mode 100644 index 0000000..ce04a93 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/service.go @@ -0,0 +1,180 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + apiv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +// Service implements discovery of Kubernetes services. +type Service struct { + logger log.Logger + informer cache.SharedInformer + store cache.Store + queue *workqueue.Type +} + +// NewService returns a new service discovery. +func NewService(l log.Logger, inf cache.SharedInformer) *Service { + if l == nil { + l = log.NewNopLogger() + } + s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")} + s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(o interface{}) { + eventCount.WithLabelValues("service", "add").Inc() + s.enqueue(o) + }, + DeleteFunc: func(o interface{}) { + eventCount.WithLabelValues("service", "delete").Inc() + s.enqueue(o) + }, + UpdateFunc: func(_, o interface{}) { + eventCount.WithLabelValues("service", "update").Inc() + s.enqueue(o) + }, + }) + return s +} + +func (e *Service) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + + e.queue.Add(key) +} + +// Run implements the Discoverer interface. +func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer s.queue.ShutDown() + + if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { + level.Error(s.logger).Log("msg", "service informer unable to sync cache") + return + } + + go func() { + for s.process(ctx, ch) { + } + }() + + // Block until the target provider is explicitly canceled. + <-ctx.Done() +} + +func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool { + keyObj, quit := s.queue.Get() + if quit { + return false + } + defer s.queue.Done(keyObj) + key := keyObj.(string) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return true + } + + o, exists, err := s.store.GetByKey(key) + if err != nil { + return true + } + if !exists { + send(ctx, s.logger, RoleService, ch, &targetgroup.Group{Source: serviceSourceFromNamespaceAndName(namespace, name)}) + return true + } + eps, err := convertToService(o) + if err != nil { + level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err) + return true + } + send(ctx, s.logger, RoleService, ch, s.buildService(eps)) + return true +} + +func convertToService(o interface{}) (*apiv1.Service, error) { + service, ok := o.(*apiv1.Service) + if ok { + return service, nil + } + return nil, fmt.Errorf("Received unexpected object: %v", o) +} + +func serviceSource(s *apiv1.Service) string { + return serviceSourceFromNamespaceAndName(s.Namespace, s.Name) +} + +func serviceSourceFromNamespaceAndName(namespace, name string) string { + return "svc/" + namespace + "/" + name +} + +const ( + serviceNameLabel = metaLabelPrefix + "service_name" + serviceLabelPrefix = metaLabelPrefix + "service_label_" + serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_" + servicePortNameLabel = metaLabelPrefix + "service_port_name" + servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" +) + +func serviceLabels(svc *apiv1.Service) model.LabelSet { + ls := make(model.LabelSet, len(svc.Labels)+len(svc.Annotations)+2) + + ls[serviceNameLabel] = lv(svc.Name) + ls[namespaceLabel] = lv(svc.Namespace) + + for k, v := range svc.Labels { + ln := strutil.SanitizeLabelName(serviceLabelPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + + for k, v := range svc.Annotations { + ln := strutil.SanitizeLabelName(serviceAnnotationPrefix + k) + ls[model.LabelName(ln)] = lv(v) + } + return ls +} + +func (s *Service) buildService(svc *apiv1.Service) *targetgroup.Group { + tg := &targetgroup.Group{ + Source: serviceSource(svc), + } + tg.Labels = serviceLabels(svc) + + for _, port := range svc.Spec.Ports { + addr := net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", strconv.FormatInt(int64(port.Port), 10)) + + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: lv(addr), + servicePortNameLabel: lv(port.Name), + servicePortProtocolLabel: lv(string(port.Protocol)), + }) + } + + return tg +} diff --git a/src/prometheus/discovery/kubernetes/service_test.go b/src/prometheus/discovery/kubernetes/service_test.go new file mode 100644 index 0000000..e300c85 --- /dev/null +++ b/src/prometheus/discovery/kubernetes/service_test.go @@ -0,0 +1,204 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +func makeMultiPortService() *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testservice", + Namespace: "default", + Labels: map[string]string{"testlabel": "testvalue"}, + Annotations: map[string]string{"testannotation": "testannotationvalue"}, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "testport0", + Protocol: v1.ProtocolTCP, + Port: int32(30900), + }, + { + Name: "testport1", + Protocol: v1.ProtocolUDP, + Port: int32(30901), + }, + }, + }, + } +} + +func makeSuffixedService(suffix string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("testservice%s", suffix), + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "testport", + Protocol: v1.ProtocolTCP, + Port: int32(30900), + }, + }, + }, + } +} + +func makeService() *v1.Service { + return makeSuffixedService("") +} + +func TestServiceDiscoveryAdd(t *testing.T) { + n, c, w := makeDiscovery(RoleService, NamespaceDiscovery{}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeService() + c.CoreV1().Services(obj.Namespace).Create(obj) + w.Services().Add(obj) + }, + expectedMaxItems: 1, + expectedRes: map[string]*targetgroup.Group{ + "svc/default/testservice": { + Targets: []model.LabelSet{ + { + "__meta_kubernetes_service_port_protocol": "TCP", + "__address__": "testservice.default.svc:30900", + "__meta_kubernetes_service_port_name": "testport", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_service_name": "testservice", + "__meta_kubernetes_namespace": "default", + }, + Source: "svc/default/testservice", + }, + }, + }.Run(t) +} + +func TestServiceDiscoveryDelete(t *testing.T) { + n, c, w := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService()) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeService() + c.CoreV1().Services(obj.Namespace).Delete(obj.Name, &metav1.DeleteOptions{}) + w.Services().Delete(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "svc/default/testservice": { + Source: "svc/default/testservice", + }, + }, + }.Run(t) +} + +func TestServiceDiscoveryUpdate(t *testing.T) { + n, c, w := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService()) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + obj := makeMultiPortService() + c.CoreV1().Services(obj.Namespace).Update(obj) + w.Services().Modify(obj) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "svc/default/testservice": { + Targets: []model.LabelSet{ + { + "__meta_kubernetes_service_port_protocol": "TCP", + "__address__": "testservice.default.svc:30900", + "__meta_kubernetes_service_port_name": "testport0", + }, + { + "__meta_kubernetes_service_port_protocol": "UDP", + "__address__": "testservice.default.svc:30901", + "__meta_kubernetes_service_port_name": "testport1", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_service_name": "testservice", + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_service_label_testlabel": "testvalue", + "__meta_kubernetes_service_annotation_testannotation": "testannotationvalue", + }, + Source: "svc/default/testservice", + }, + }, + }.Run(t) +} + +func TestServiceDiscoveryNamespaces(t *testing.T) { + n, c, w := makeDiscovery(RoleService, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) + + k8sDiscoveryTest{ + discovery: n, + afterStart: func() { + for _, ns := range []string{"ns1", "ns2"} { + obj := makeService() + obj.Namespace = ns + c.CoreV1().Services(obj.Namespace).Create(obj) + w.Services().Add(obj) + } + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "svc/ns1/testservice": { + Targets: []model.LabelSet{ + { + "__meta_kubernetes_service_port_protocol": "TCP", + "__address__": "testservice.ns1.svc:30900", + "__meta_kubernetes_service_port_name": "testport", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_service_name": "testservice", + "__meta_kubernetes_namespace": "ns1", + }, + Source: "svc/ns1/testservice", + }, + "svc/ns2/testservice": { + Targets: []model.LabelSet{ + { + "__meta_kubernetes_service_port_protocol": "TCP", + "__address__": "testservice.ns2.svc:30900", + "__meta_kubernetes_service_port_name": "testport", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_service_name": "testservice", + "__meta_kubernetes_namespace": "ns2", + }, + Source: "svc/ns2/testservice", + }, + }, + }.Run(t) +} diff --git a/src/prometheus/discovery/manager.go b/src/prometheus/discovery/manager.go new file mode 100644 index 0000000..97468a5 --- /dev/null +++ b/src/prometheus/discovery/manager.go @@ -0,0 +1,308 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + + sd_config "github.com/prometheus/prometheus/discovery/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + + "github.com/prometheus/prometheus/discovery/azure" + "github.com/prometheus/prometheus/discovery/consul" + "github.com/prometheus/prometheus/discovery/dns" + "github.com/prometheus/prometheus/discovery/ec2" + "github.com/prometheus/prometheus/discovery/file" + "github.com/prometheus/prometheus/discovery/gce" + "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/discovery/marathon" + "github.com/prometheus/prometheus/discovery/openstack" + "github.com/prometheus/prometheus/discovery/triton" + "github.com/prometheus/prometheus/discovery/zookeeper" +) + +// Discoverer provides information about target groups. It maintains a set +// of sources from which TargetGroups can originate. Whenever a discovery provider +// detects a potential change, it sends the TargetGroup through its channel. +// +// Discoverer does not know if an actual change happened. +// It does guarantee that it sends the new TargetGroup whenever a change happens. +// +// Discoverers should initially send a full set of all discoverable TargetGroups. +type Discoverer interface { + // Run hands a channel to the discovery provider(consul,dns etc) through which it can send + // updated target groups. + // Must returns if the context gets canceled. It should not close the update + // channel on returning. + Run(ctx context.Context, up chan<- []*targetgroup.Group) +} + +type poolKey struct { + setName string + provider string +} + +// NewManager is the Discovery Manager constructor +func NewManager(ctx context.Context, logger log.Logger) *Manager { + return &Manager{ + logger: logger, + syncCh: make(chan map[string][]*targetgroup.Group), + targets: make(map[poolKey]map[string]*targetgroup.Group), + discoverCancel: []context.CancelFunc{}, + ctx: ctx, + } +} + +// Manager maintains a set of discovery providers and sends each update to a map channel. +// Targets are grouped by the target set name. +type Manager struct { + logger log.Logger + mtx sync.RWMutex + ctx context.Context + discoverCancel []context.CancelFunc + // Some Discoverers(eg. k8s) send only the updates for a given target group + // so we use map[tg.Source]*targetgroup.Group to know which group to update. + targets map[poolKey]map[string]*targetgroup.Group + // The sync channels sends the updates in map[targetSetName] where targetSetName is the job value from the scrape config. + syncCh chan map[string][]*targetgroup.Group + // True if updates were received in the last 5 seconds. + recentlyUpdated bool + // Protects recentlyUpdated. + recentlyUpdatedMtx sync.Mutex +} + +// Run starts the background processing +func (m *Manager) Run() error { + for range m.ctx.Done() { + m.cancelDiscoverers() + return m.ctx.Err() + } + return nil +} + +// SyncCh returns a read only channel used by all Discoverers to send target updates. +func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { + return m.syncCh +} + +// ApplyConfig removes all running discovery providers and starts new ones using the provided config. +func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + m.cancelDiscoverers() + for name, scfg := range cfg { + for provName, prov := range m.providersFromConfig(scfg) { + m.startProvider(m.ctx, poolKey{setName: name, provider: provName}, prov) + } + } + + return nil +} + +// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. +func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker Discoverer) { + // Pool key for non-standard SD implementations are unknown. + poolKey := poolKey{setName: name, provider: name} + m.startProvider(ctx, poolKey, worker) +} + +func (m *Manager) startProvider(ctx context.Context, poolKey poolKey, worker Discoverer) { + ctx, cancel := context.WithCancel(ctx) + updates := make(chan []*targetgroup.Group) + + m.discoverCancel = append(m.discoverCancel, cancel) + + go worker.Run(ctx, updates) + go m.runProvider(ctx, poolKey, updates) + go m.runUpdater(ctx) +} + +func (m *Manager) runProvider(ctx context.Context, poolKey poolKey, updates chan []*targetgroup.Group) { + for { + select { + case <-ctx.Done(): + return + case tgs, ok := <-updates: + // Handle the case that a target provider exits and closes the channel + // before the context is done. + if !ok { + return + } + m.updateGroup(poolKey, tgs) + m.recentlyUpdatedMtx.Lock() + m.recentlyUpdated = true + m.recentlyUpdatedMtx.Unlock() + } + } +} + +func (m *Manager) runUpdater(ctx context.Context) { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + m.recentlyUpdatedMtx.Lock() + if m.recentlyUpdated { + m.syncCh <- m.allGroups() + m.recentlyUpdated = false + } + m.recentlyUpdatedMtx.Unlock() + } + } +} + +func (m *Manager) cancelDiscoverers() { + for _, c := range m.discoverCancel { + c() + } + m.targets = make(map[poolKey]map[string]*targetgroup.Group) + m.discoverCancel = nil +} + +func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, tg := range tgs { + if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + if _, ok := m.targets[poolKey]; !ok { + m.targets[poolKey] = make(map[string]*targetgroup.Group) + } + m.targets[poolKey][tg.Source] = tg + } + } +} + +func (m *Manager) allGroups() map[string][]*targetgroup.Group { + m.mtx.Lock() + defer m.mtx.Unlock() + + tSets := map[string][]*targetgroup.Group{} + for pkey, tsets := range m.targets { + for _, tg := range tsets { + // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' + // to signal that it needs to stop all scrape loops for this target set. + tSets[pkey.setName] = append(tSets[pkey.setName], tg) + } + } + return tSets +} + +func (m *Manager) providersFromConfig(cfg sd_config.ServiceDiscoveryConfig) map[string]Discoverer { + providers := map[string]Discoverer{} + + app := func(mech string, i int, tp Discoverer) { + providers[fmt.Sprintf("%s/%d", mech, i)] = tp + } + + for i, c := range cfg.DNSSDConfigs { + app("dns", i, dns.NewDiscovery(*c, log.With(m.logger, "discovery", "dns"))) + } + for i, c := range cfg.FileSDConfigs { + app("file", i, file.NewDiscovery(c, log.With(m.logger, "discovery", "file"))) + } + for i, c := range cfg.ConsulSDConfigs { + k, err := consul.NewDiscovery(c, log.With(m.logger, "discovery", "consul")) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create Consul discovery", "err", err) + continue + } + app("consul", i, k) + } + for i, c := range cfg.MarathonSDConfigs { + t, err := marathon.NewDiscovery(*c, log.With(m.logger, "discovery", "marathon")) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create Marathon discovery", "err", err) + continue + } + app("marathon", i, t) + } + for i, c := range cfg.KubernetesSDConfigs { + k, err := kubernetes.New(log.With(m.logger, "discovery", "k8s"), c) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create Kubernetes discovery", "err", err) + continue + } + app("kubernetes", i, k) + } + for i, c := range cfg.ServersetSDConfigs { + app("serverset", i, zookeeper.NewServersetDiscovery(c, log.With(m.logger, "discovery", "zookeeper"))) + } + for i, c := range cfg.NerveSDConfigs { + app("nerve", i, zookeeper.NewNerveDiscovery(c, log.With(m.logger, "discovery", "nerve"))) + } + for i, c := range cfg.EC2SDConfigs { + app("ec2", i, ec2.NewDiscovery(c, log.With(m.logger, "discovery", "ec2"))) + } + for i, c := range cfg.OpenstackSDConfigs { + openstackd, err := openstack.NewDiscovery(c, log.With(m.logger, "discovery", "openstack")) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot initialize OpenStack discovery", "err", err) + continue + } + app("openstack", i, openstackd) + } + + for i, c := range cfg.GCESDConfigs { + gced, err := gce.NewDiscovery(*c, log.With(m.logger, "discovery", "gce")) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot initialize GCE discovery", "err", err) + continue + } + app("gce", i, gced) + } + for i, c := range cfg.AzureSDConfigs { + app("azure", i, azure.NewDiscovery(c, log.With(m.logger, "discovery", "azure"))) + } + for i, c := range cfg.TritonSDConfigs { + t, err := triton.New(log.With(m.logger, "discovery", "trition"), c) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create Triton discovery", "err", err) + continue + } + app("triton", i, t) + } + if len(cfg.StaticConfigs) > 0 { + app("static", 0, &StaticProvider{cfg.StaticConfigs}) + } + + return providers +} + +// StaticProvider holds a list of target groups that never change. +type StaticProvider struct { + TargetGroups []*targetgroup.Group +} + +// Run implements the Worker interface. +func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // We still have to consider that the consumer exits right away in which case + // the context will be canceled. + select { + case ch <- sd.TargetGroups: + case <-ctx.Done(): + } + close(ch) +} diff --git a/src/prometheus/discovery/manager_test.go b/src/prometheus/discovery/manager_test.go new file mode 100644 index 0000000..19efb7d --- /dev/null +++ b/src/prometheus/discovery/manager_test.go @@ -0,0 +1,857 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + sd_config "github.com/prometheus/prometheus/discovery/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "gopkg.in/yaml.v2" +) + +// TestTargetUpdatesOrder checks that the target updates are received in the expected order. +func TestTargetUpdatesOrder(t *testing.T) { + + // The order by which the updates are send is detirmened by the interval passed to the mock discovery adapter + // Final targets array is ordered alphabetically by the name of the discoverer. + // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. + testCases := []struct { + title string + updates map[string][]update + expectedTargets [][]*targetgroup.Group + }{ + { + title: "Single TP no updates", + updates: map[string][]update{ + "tp1": {}, + }, + expectedTargets: nil, + }, + { + title: "Multips TPs no updates", + updates: map[string][]update{ + "tp1": {}, + "tp2": {}, + "tp3": {}, + }, + expectedTargets: nil, + }, + { + title: "Single TP empty initials", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{}, + interval: 5, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + {}, + }, + }, + { + title: "Multiple TPs empty initials", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{}, + interval: 5, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{}, + interval: 200, + }, + }, + "tp3": { + { + targetGroups: []targetgroup.Group{}, + interval: 100, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + {}, + {}, + {}, + }, + }, + { + title: "Single TP initials only", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }}, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + { + title: "Multiple TPs initials only", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + }, + interval: 10, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + }, + }, + }, + { + title: "Single TP initials followed by empty updates", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 0, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + interval: 10, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + }, + }, + { + title: "Single TP initials and new groups", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 0, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + interval: 10, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + title: "Multiple TPs initials and new groups", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 10, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group4", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 500, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + interval: 100, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + interval: 10, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group4", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + }, + }, + { + title: "One TP initials arrive after other TP updates.", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 10, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 150, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + interval: 200, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + interval: 100, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + }, + }, + + { + title: "Single TP empty update in between", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 30, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + interval: 10, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 300, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + }, + }, + } + + for testIndex, testCase := range testCases { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, nil) + + var totalUpdatesCount int + + provUpdates := make(chan []*targetgroup.Group) + for _, up := range testCase.updates { + go newMockDiscoveryProvider(up).Run(ctx, provUpdates) + if len(up) > 0 { + totalUpdatesCount = totalUpdatesCount + len(up) + } + } + + Loop: + for x := 0; x < totalUpdatesCount; x++ { + select { + case <-time.After(10 * time.Second): + t.Errorf("%v. %q: no update arrived within the timeout limit", x, testCase.title) + break Loop + case tgs := <-provUpdates: + discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(testIndex), provider: testCase.title}, tgs) + for _, received := range discoveryManager.allGroups() { + // Need to sort by the Groups source as the received order is not guaranteed. + sort.Sort(byGroupSource(received)) + if !reflect.DeepEqual(received, testCase.expectedTargets[x]) { + var receivedFormated string + for _, receivedTargets := range received { + receivedFormated = receivedFormated + receivedTargets.Source + ":" + fmt.Sprint(receivedTargets.Targets) + } + var expectedFormated string + for _, expectedTargets := range testCase.expectedTargets[x] { + expectedFormated = expectedFormated + expectedTargets.Source + ":" + fmt.Sprint(expectedTargets.Targets) + } + + t.Errorf("%v. %v: \ntargets mismatch \nreceived: %v \nexpected: %v", + x, testCase.title, + receivedFormated, + expectedFormated) + } + } + } + } + } +} + +func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { + verifyPresence := func(tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { + if _, ok := tSets[poolKey]; !ok { + t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) + return + } + + match := false + var mergedTargets string + for _, targetGroup := range tSets[poolKey] { + + for _, l := range targetGroup.Targets { + mergedTargets = mergedTargets + " " + l.String() + if l.String() == label { + match = true + } + } + + } + if match != present { + msg := "" + if !present { + msg = "not" + } + t.Fatalf("'%s' should %s be present in Targets labels: %v", label, msg, mergedTargets) + } + } + + cfg := &config.Config{} + + sOne := ` +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ["foo:9090"] + - targets: ["bar:9090"] +` + if err := yaml.UnmarshalStrict([]byte(sOne), cfg); err != nil { + t.Fatalf("Unable to load YAML config sOne: %s", err) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, nil) + go discoveryManager.Run() + + c := make(map[string]sd_config.ServiceDiscoveryConfig) + for _, v := range cfg.ScrapeConfigs { + c[v.JobName] = v.ServiceDiscoveryConfig + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + + sTwo := ` +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ["foo:9090"] +` + if err := yaml.UnmarshalStrict([]byte(sTwo), cfg); err != nil { + t.Fatalf("Unable to load YAML config sOne: %s", err) + } + c = make(map[string]sd_config.ServiceDiscoveryConfig) + for _, v := range cfg.ScrapeConfigs { + c[v.JobName] = v.ServiceDiscoveryConfig + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) +} + +func TestApplyConfigDoesNotModifyStaticProviderTargets(t *testing.T) { + cfgText := ` +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ["foo:9090"] + - targets: ["bar:9090"] + - targets: ["baz:9090"] +` + originalConfig := &config.Config{} + if err := yaml.UnmarshalStrict([]byte(cfgText), originalConfig); err != nil { + t.Fatalf("Unable to load YAML config cfgYaml: %s", err) + } + origScrpCfg := originalConfig.ScrapeConfigs[0] + + processedConfig := &config.Config{} + if err := yaml.UnmarshalStrict([]byte(cfgText), processedConfig); err != nil { + t.Fatalf("Unable to load YAML config cfgYaml: %s", err) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, nil) + go discoveryManager.Run() + + c := make(map[string]sd_config.ServiceDiscoveryConfig) + for _, v := range processedConfig.ScrapeConfigs { + c[v.JobName] = v.ServiceDiscoveryConfig + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + for _, sdcfg := range c { + if !reflect.DeepEqual(origScrpCfg.ServiceDiscoveryConfig.StaticConfigs, sdcfg.StaticConfigs) { + t.Fatalf("discovery manager modified static config \n expected: %v\n got: %v\n", + origScrpCfg.ServiceDiscoveryConfig.StaticConfigs, sdcfg.StaticConfigs) + } + } +} + +type update struct { + targetGroups []targetgroup.Group + interval time.Duration +} + +type mockdiscoveryProvider struct { + updates []update + up chan<- []*targetgroup.Group +} + +func newMockDiscoveryProvider(updates []update) mockdiscoveryProvider { + + tp := mockdiscoveryProvider{ + updates: updates, + } + return tp +} + +func (tp mockdiscoveryProvider) Run(ctx context.Context, up chan<- []*targetgroup.Group) { + tp.up = up + tp.sendUpdates() +} + +func (tp mockdiscoveryProvider) sendUpdates() { + for _, update := range tp.updates { + + time.Sleep(update.interval * time.Millisecond) + + tgs := make([]*targetgroup.Group, len(update.targetGroups)) + for i := range update.targetGroups { + tgs[i] = &update.targetGroups[i] + } + tp.up <- tgs + } +} + +// byGroupSource implements sort.Interface so we can sort by the Source field. +type byGroupSource []*targetgroup.Group + +func (a byGroupSource) Len() int { return len(a) } +func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } diff --git a/src/prometheus/discovery/marathon/marathon.go b/src/prometheus/discovery/marathon/marathon.go new file mode 100644 index 0000000..8de42cb --- /dev/null +++ b/src/prometheus/discovery/marathon/marathon.go @@ -0,0 +1,448 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package marathon + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + // metaLabelPrefix is the meta prefix used for all meta labels in this discovery. + metaLabelPrefix = model.MetaLabelPrefix + "marathon_" + // appLabelPrefix is the prefix for the application labels. + appLabelPrefix = metaLabelPrefix + "app_label_" + + // appLabel is used for the name of the app in Marathon. + appLabel model.LabelName = metaLabelPrefix + "app" + // imageLabel is the label that is used for the docker image running the service. + imageLabel model.LabelName = metaLabelPrefix + "image" + // portIndexLabel is the integer port index when multiple ports are defined; + // e.g. PORT1 would have a value of '1' + portIndexLabel model.LabelName = metaLabelPrefix + "port_index" + // taskLabel contains the mesos task name of the app instance. + taskLabel model.LabelName = metaLabelPrefix + "task" + + // portMappingLabelPrefix is the prefix for the application portMappings labels. + portMappingLabelPrefix = metaLabelPrefix + "port_mapping_label_" + // portDefinitionLabelPrefix is the prefix for the application portDefinitions labels. + portDefinitionLabelPrefix = metaLabelPrefix + "port_definition_label_" + + // Constants for instrumentation. + namespace = "prometheus" +) + +var ( + refreshFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_marathon_refresh_failures_total", + Help: "The number of Marathon-SD refresh failures.", + }) + refreshDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Namespace: namespace, + Name: "sd_marathon_refresh_duration_seconds", + Help: "The duration of a Marathon-SD refresh in seconds.", + }) + // DefaultSDConfig is the default Marathon SD configuration. + DefaultSDConfig = SDConfig{ + RefreshInterval: model.Duration(30 * time.Second), + } +) + +// SDConfig is the configuration for services running on Marathon. +type SDConfig struct { + Servers []string `yaml:"servers,omitempty"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + AuthToken config_util.Secret `yaml:"auth_token,omitempty"` + AuthTokenFile string `yaml:"auth_token_file,omitempty"` + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if len(c.Servers) == 0 { + return fmt.Errorf("marathon_sd: must contain at least one Marathon server") + } + if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 { + return fmt.Errorf("marathon_sd: at most one of auth_token & auth_token_file must be configured") + } + if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { + return fmt.Errorf("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured") + } + if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { + return fmt.Errorf("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") + } + if err := c.HTTPClientConfig.Validate(); err != nil { + return err + } + + return nil +} + +func init() { + prometheus.MustRegister(refreshFailuresCount) + prometheus.MustRegister(refreshDuration) +} + +const appListPath string = "/v2/apps/?embed=apps.tasks" + +// Discovery provides service discovery based on a Marathon instance. +type Discovery struct { + client *http.Client + servers []string + refreshInterval time.Duration + lastRefresh map[string]*targetgroup.Group + appsClient AppListClient + logger log.Logger +} + +// NewDiscovery returns a new Marathon Discovery. +func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { + if logger == nil { + logger = log.NewNopLogger() + } + + rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") + if err != nil { + return nil, err + } + + if len(conf.AuthToken) > 0 { + rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) + } else if len(conf.AuthTokenFile) > 0 { + rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) + } + if err != nil { + return nil, err + } + + return &Discovery{ + client: &http.Client{Transport: rt}, + servers: conf.Servers, + refreshInterval: time.Duration(conf.RefreshInterval), + appsClient: fetchApps, + logger: logger, + }, nil +} + +type authTokenRoundTripper struct { + authToken config_util.Secret + rt http.RoundTripper +} + +// newAuthTokenRoundTripper adds the provided auth token to a request. +func newAuthTokenRoundTripper(token config_util.Secret, rt http.RoundTripper) (http.RoundTripper, error) { + return &authTokenRoundTripper{token, rt}, nil +} + +func (rt *authTokenRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + // According to https://docs.mesosphere.com/1.11/security/oss/managing-authentication/ + // DC/OS wants with "token=" a different Authorization header than implemented in httputil/client.go + // so we set this explicitly here. + request.Header.Set("Authorization", "token="+string(rt.authToken)) + + return rt.rt.RoundTrip(request) +} + +type authTokenFileRoundTripper struct { + authTokenFile string + rt http.RoundTripper +} + +// newAuthTokenFileRoundTripper adds the auth token read from the file to a request. +func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + // fail-fast if we can't read the file. + _, err := ioutil.ReadFile(tokenFile) + if err != nil { + return nil, fmt.Errorf("unable to read auth token file %s: %s", tokenFile, err) + } + return &authTokenFileRoundTripper{tokenFile, rt}, nil +} + +func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + b, err := ioutil.ReadFile(rt.authTokenFile) + if err != nil { + return nil, fmt.Errorf("unable to read auth token file %s: %s", rt.authTokenFile, err) + } + authToken := strings.TrimSpace(string(b)) + + // According to https://docs.mesosphere.com/1.11/security/oss/managing-authentication/ + // DC/OS wants with "token=" a different Authorization header than implemented in httputil/client.go + // so we set this explicitly here. + request.Header.Set("Authorization", "token="+authToken) + return rt.rt.RoundTrip(request) +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(d.refreshInterval): + err := d.updateServices(ctx, ch) + if err != nil { + level.Error(d.logger).Log("msg", "Error while updating services", "err", err) + } + } + } +} + +func (d *Discovery) updateServices(ctx context.Context, ch chan<- []*targetgroup.Group) (err error) { + t0 := time.Now() + defer func() { + refreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + refreshFailuresCount.Inc() + } + }() + + targetMap, err := d.fetchTargetGroups() + if err != nil { + return err + } + + all := make([]*targetgroup.Group, 0, len(targetMap)) + for _, tg := range targetMap { + all = append(all, tg) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- all: + } + + // Remove services which did disappear. + for source := range d.lastRefresh { + _, ok := targetMap[source] + if !ok { + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- []*targetgroup.Group{{Source: source}}: + level.Debug(d.logger).Log("msg", "Removing group", "source", source) + } + } + } + + d.lastRefresh = targetMap + return nil +} + +func (d *Discovery) fetchTargetGroups() (map[string]*targetgroup.Group, error) { + url := RandomAppsURL(d.servers) + apps, err := d.appsClient(d.client, url) + if err != nil { + return nil, err + } + + groups := AppsToTargetGroups(apps) + return groups, nil +} + +// Task describes one instance of a service running on Marathon. +type Task struct { + ID string `json:"id"` + Host string `json:"host"` + Ports []uint32 `json:"ports"` +} + +// PortMappings describes in which port the process are binding inside the docker container. +type PortMappings struct { + Labels map[string]string `json:"labels"` +} + +// DockerContainer describes a container which uses the docker runtime. +type DockerContainer struct { + Image string `json:"image"` + PortMappings []PortMappings `json:"portMappings"` +} + +// Container describes the runtime an app in running in. +type Container struct { + Docker DockerContainer `json:"docker"` + PortMappings []PortMappings `json:"portMappings"` +} + +// PortDefinitions describes which load balancer port you should access to access the service. +type PortDefinitions struct { + Labels map[string]string `json:"labels"` +} + +// App describes a service running on Marathon. +type App struct { + ID string `json:"id"` + Tasks []Task `json:"tasks"` + RunningTasks int `json:"tasksRunning"` + Labels map[string]string `json:"labels"` + Container Container `json:"container"` + PortDefinitions []PortDefinitions `json:"portDefinitions"` +} + +// AppList is a list of Marathon apps. +type AppList struct { + Apps []App `json:"apps"` +} + +// AppListClient defines a function that can be used to get an application list from marathon. +type AppListClient func(client *http.Client, url string) (*AppList, error) + +// fetchApps requests a list of applications from a marathon server. +func fetchApps(client *http.Client, url string) (*AppList, error) { + request, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + resp, err := client.Do(request) + if err != nil { + return nil, err + } + + if (resp.StatusCode < 200) || (resp.StatusCode >= 300) { + return nil, fmt.Errorf("Non 2xx status '%v' response during marathon service discovery", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + apps, err := parseAppJSON(body) + if err != nil { + return nil, fmt.Errorf("%v in %s", err, url) + } + return apps, nil +} + +func parseAppJSON(body []byte) (*AppList, error) { + apps := &AppList{} + err := json.Unmarshal(body, apps) + if err != nil { + return nil, err + } + return apps, nil +} + +// RandomAppsURL randomly selects a server from an array and creates +// an URL pointing to the app list. +func RandomAppsURL(servers []string) string { + // TODO: If possible update server list from Marathon at some point. + server := servers[rand.Intn(len(servers))] + return fmt.Sprintf("%s%s", server, appListPath) +} + +// AppsToTargetGroups takes an array of Marathon apps and converts them into target groups. +func AppsToTargetGroups(apps *AppList) map[string]*targetgroup.Group { + tgroups := map[string]*targetgroup.Group{} + for _, a := range apps.Apps { + group := createTargetGroup(&a) + tgroups[group.Source] = group + } + return tgroups +} + +func createTargetGroup(app *App) *targetgroup.Group { + var ( + targets = targetsForApp(app) + appName = model.LabelValue(app.ID) + image = model.LabelValue(app.Container.Docker.Image) + ) + tg := &targetgroup.Group{ + Targets: targets, + Labels: model.LabelSet{ + appLabel: appName, + imageLabel: image, + }, + Source: app.ID, + } + + for ln, lv := range app.Labels { + ln = appLabelPrefix + strutil.SanitizeLabelName(ln) + tg.Labels[model.LabelName(ln)] = model.LabelValue(lv) + } + + return tg +} + +func targetsForApp(app *App) []model.LabelSet { + targets := make([]model.LabelSet, 0, len(app.Tasks)) + for _, t := range app.Tasks { + if len(t.Ports) == 0 { + continue + } + for i := 0; i < len(t.Ports); i++ { + targetAddress := targetForTask(&t, i) + target := model.LabelSet{ + model.AddressLabel: model.LabelValue(targetAddress), + taskLabel: model.LabelValue(t.ID), + portIndexLabel: model.LabelValue(strconv.Itoa(i)), + } + if i < len(app.PortDefinitions) { + for ln, lv := range app.PortDefinitions[i].Labels { + ln = portDefinitionLabelPrefix + strutil.SanitizeLabelName(ln) + target[model.LabelName(ln)] = model.LabelValue(lv) + } + } + // Prior to Marathon 1.5 the port mappings could be found at the path + // "container.docker.portMappings". When support for Marathon 1.4 + // is dropped then this section of code can be removed. + if i < len(app.Container.Docker.PortMappings) { + for ln, lv := range app.Container.Docker.PortMappings[i].Labels { + ln = portMappingLabelPrefix + strutil.SanitizeLabelName(ln) + target[model.LabelName(ln)] = model.LabelValue(lv) + } + } + // In Marathon 1.5.x the container.docker.portMappings object was moved + // to container.portMappings. + if i < len(app.Container.PortMappings) { + for ln, lv := range app.Container.PortMappings[i].Labels { + ln = portMappingLabelPrefix + strutil.SanitizeLabelName(ln) + target[model.LabelName(ln)] = model.LabelValue(lv) + } + } + targets = append(targets, target) + } + } + return targets +} + +func targetForTask(task *Task, index int) string { + return net.JoinHostPort(task.Host, fmt.Sprintf("%d", task.Ports[index])) +} diff --git a/src/prometheus/discovery/marathon/marathon_test.go b/src/prometheus/discovery/marathon/marathon_test.go new file mode 100644 index 0000000..5ad978e --- /dev/null +++ b/src/prometheus/discovery/marathon/marathon_test.go @@ -0,0 +1,577 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package marathon + +import ( + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +var ( + marathonValidLabel = map[string]string{"prometheus": "yes"} + testServers = []string{"http://localhost:8080"} + conf = SDConfig{Servers: testServers} +) + +func testUpdateServices(client AppListClient, ch chan []*targetgroup.Group) error { + md, err := NewDiscovery(conf, nil) + if err != nil { + return err + } + md.appsClient = client + return md.updateServices(context.Background(), ch) +} + +func TestMarathonSDHandleError(t *testing.T) { + var ( + errTesting = errors.New("testing failure") + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { return nil, errTesting } + ) + if err := testUpdateServices(client, ch); err != errTesting { + t.Fatalf("Expected error: %s", err) + } + select { + case tg := <-ch: + t.Fatalf("Got group: %s", tg) + default: + } +} + +func TestMarathonSDEmptyList(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { return &AppList{}, nil } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tg := <-ch: + if len(tg) > 0 { + t.Fatalf("Got group: %v", tg) + } + default: + } +} + +func marathonTestAppList(labels map[string]string, runningTasks int) *AppList { + var ( + task = Task{ + ID: "test-task-1", + Host: "mesos-slave1", + Ports: []uint32{31000}, + } + docker = DockerContainer{ + Image: "repo/image:tag", + PortMappings: []PortMappings{ + {Labels: labels}, + }, + } + container = Container{Docker: docker} + app = App{ + ID: "test-service", + Tasks: []Task{task}, + RunningTasks: runningTasks, + Labels: labels, + Container: container, + PortDefinitions: []PortDefinitions{ + {Labels: make(map[string]string)}, + }, + } + ) + return &AppList{ + Apps: []App{app}, + } +} + +func TestMarathonSDSendGroup(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppList(marathonValidLabel, 1), nil + } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tgs := <-ch: + tg := tgs[0] + + if tg.Source != "test-service" { + t.Fatalf("Wrong target group name: %s", tg.Source) + } + if len(tg.Targets) != 1 { + t.Fatalf("Wrong number of targets: %v", tg.Targets) + } + tgt := tg.Targets[0] + if tgt[model.AddressLabel] != "mesos-slave1:31000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { + t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) + } + default: + t.Fatal("Did not get a target group.") + } +} + +func TestMarathonSDRemoveApp(t *testing.T) { + var ch = make(chan []*targetgroup.Group, 1) + md, err := NewDiscovery(conf, nil) + if err != nil { + t.Fatalf("%s", err) + } + + md.appsClient = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppList(marathonValidLabel, 1), nil + } + if err := md.updateServices(context.Background(), ch); err != nil { + t.Fatalf("Got error on first update: %s", err) + } + up1 := (<-ch)[0] + + md.appsClient = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppList(marathonValidLabel, 0), nil + } + if err := md.updateServices(context.Background(), ch); err != nil { + t.Fatalf("Got error on second update: %s", err) + } + up2 := (<-ch)[0] + + if up2.Source != up1.Source { + t.Fatalf("Source is different: %s", up2) + if len(up2.Targets) > 0 { + t.Fatalf("Got a non-empty target set: %s", up2.Targets) + } + } +} + +func TestMarathonSDRunAndStop(t *testing.T) { + var ( + refreshInterval = model.Duration(time.Millisecond * 10) + conf = SDConfig{Servers: testServers, RefreshInterval: refreshInterval} + ch = make(chan []*targetgroup.Group) + doneCh = make(chan error) + ) + md, err := NewDiscovery(conf, nil) + if err != nil { + t.Fatalf("%s", err) + } + md.appsClient = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppList(marathonValidLabel, 1), nil + } + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + md.Run(ctx, ch) + close(doneCh) + }() + + timeout := time.After(md.refreshInterval * 3) + for { + select { + case <-ch: + cancel() + case <-doneCh: + return + case <-timeout: + t.Fatalf("Update took too long.") + } + } +} + +func marathonTestAppListWithMutiplePorts(labels map[string]string, runningTasks int) *AppList { + var ( + task = Task{ + ID: "test-task-1", + Host: "mesos-slave1", + Ports: []uint32{31000, 32000}, + } + docker = DockerContainer{ + Image: "repo/image:tag", + PortMappings: []PortMappings{ + {Labels: labels}, + {Labels: make(map[string]string)}, + }, + } + container = Container{Docker: docker} + app = App{ + ID: "test-service", + Tasks: []Task{task}, + RunningTasks: runningTasks, + Labels: labels, + Container: container, + PortDefinitions: []PortDefinitions{ + {Labels: make(map[string]string)}, + {Labels: labels}, + }, + } + ) + return &AppList{ + Apps: []App{app}, + } +} + +func TestMarathonSDSendGroupWithMutiplePort(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppListWithMutiplePorts(marathonValidLabel, 1), nil + } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tgs := <-ch: + tg := tgs[0] + + if tg.Source != "test-service" { + t.Fatalf("Wrong target group name: %s", tg.Source) + } + if len(tg.Targets) != 2 { + t.Fatalf("Wrong number of targets: %v", tg.Targets) + } + tgt := tg.Targets[0] + if tgt[model.AddressLabel] != "mesos-slave1:31000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { + t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) + } + tgt = tg.Targets[1] + if tgt[model.AddressLabel] != "mesos-slave1:32000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "yes" { + t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) + } + default: + t.Fatal("Did not get a target group.") + } +} + +func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) *AppList { + var ( + task = Task{ + ID: "test-task-2", + Host: "mesos-slave-2", + Ports: []uint32{}, + } + docker = DockerContainer{Image: "repo/image:tag"} + container = Container{Docker: docker} + app = App{ + ID: "test-service-zero-ports", + Tasks: []Task{task}, + RunningTasks: runningTasks, + Labels: labels, + Container: container, + } + ) + return &AppList{ + Apps: []App{app}, + } +} + +func TestMarathonZeroTaskPorts(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { + return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil + } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tgs := <-ch: + tg := tgs[0] + + if tg.Source != "test-service-zero-ports" { + t.Fatalf("Wrong target group name: %s", tg.Source) + } + if len(tg.Targets) != 0 { + t.Fatalf("Wrong number of targets: %v", tg.Targets) + } + default: + t.Fatal("Did not get a target group.") + } +} + +func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = fetchApps + ) + // Simulate 500 error with a valid JSON response. + respHandler := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{}`) + } + // Create a test server with mock HTTP handler. + ts := httptest.NewServer(http.HandlerFunc(respHandler)) + defer ts.Close() + // Backup conf for future tests. + backupConf := conf + defer func() { + conf = backupConf + }() + // Setup conf for the test case. + conf = SDConfig{Servers: []string{ts.URL}} + // Execute test case and validate behaviour. + if err := testUpdateServices(client, ch); err == nil { + t.Fatalf("Expected error for 5xx HTTP response from marathon server") + } +} + +func marathonTestAppListWithoutPortMappings(labels map[string]string, runningTasks int) *AppList { + var ( + task = Task{ + ID: "test-task-1", + Host: "mesos-slave1", + Ports: []uint32{31000, 32000}, + } + docker = DockerContainer{ + Image: "repo/image:tag", + } + container = Container{Docker: docker} + app = App{ + ID: "test-service", + Tasks: []Task{task}, + RunningTasks: runningTasks, + Labels: labels, + Container: container, + PortDefinitions: []PortDefinitions{ + {Labels: make(map[string]string)}, + {Labels: labels}, + }, + } + ) + return &AppList{ + Apps: []App{app}, + } +} + +func TestMarathonSDSendGroupWithoutPortMappings(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppListWithoutPortMappings(marathonValidLabel, 1), nil + } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tgs := <-ch: + tg := tgs[0] + + if tg.Source != "test-service" { + t.Fatalf("Wrong target group name: %s", tg.Source) + } + if len(tg.Targets) != 2 { + t.Fatalf("Wrong number of targets: %v", tg.Targets) + } + tgt := tg.Targets[0] + if tgt[model.AddressLabel] != "mesos-slave1:31000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) + } + tgt = tg.Targets[1] + if tgt[model.AddressLabel] != "mesos-slave1:32000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "yes" { + t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) + } + default: + t.Fatal("Did not get a target group.") + } +} + +func marathonTestAppListWithoutPortDefinitions(labels map[string]string, runningTasks int) *AppList { + var ( + task = Task{ + ID: "test-task-1", + Host: "mesos-slave1", + Ports: []uint32{31000, 32000}, + } + docker = DockerContainer{ + Image: "repo/image:tag", + PortMappings: []PortMappings{ + {Labels: labels}, + {Labels: make(map[string]string)}, + }, + } + container = Container{Docker: docker} + app = App{ + ID: "test-service", + Tasks: []Task{task}, + RunningTasks: runningTasks, + Labels: labels, + Container: container, + } + ) + return &AppList{ + Apps: []App{app}, + } +} + +func TestMarathonSDSendGroupWithoutPortDefinitions(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppListWithoutPortDefinitions(marathonValidLabel, 1), nil + } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tgs := <-ch: + tg := tgs[0] + + if tg.Source != "test-service" { + t.Fatalf("Wrong target group name: %s", tg.Source) + } + if len(tg.Targets) != 2 { + t.Fatalf("Wrong number of targets: %v", tg.Targets) + } + tgt := tg.Targets[0] + if tgt[model.AddressLabel] != "mesos-slave1:31000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { + t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) + } + tgt = tg.Targets[1] + if tgt[model.AddressLabel] != "mesos-slave1:32000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) + } + default: + t.Fatal("Did not get a target group.") + } +} + +func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *AppList { + var ( + task = Task{ + ID: "test-task-1", + Host: "mesos-slave1", + Ports: []uint32{31000, 32000}, + } + docker = DockerContainer{ + Image: "repo/image:tag", + } + container = Container{ + Docker: docker, + PortMappings: []PortMappings{ + {Labels: labels}, + {Labels: make(map[string]string)}, + }, + } + app = App{ + ID: "test-service", + Tasks: []Task{task}, + RunningTasks: runningTasks, + Labels: labels, + Container: container, + } + ) + return &AppList{ + Apps: []App{app}, + } +} + +func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { + var ( + ch = make(chan []*targetgroup.Group, 1) + client = func(client *http.Client, url string) (*AppList, error) { + return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil + } + ) + if err := testUpdateServices(client, ch); err != nil { + t.Fatalf("Got error: %s", err) + } + select { + case tgs := <-ch: + tg := tgs[0] + + if tg.Source != "test-service" { + t.Fatalf("Wrong target group name: %s", tg.Source) + } + if len(tg.Targets) != 2 { + t.Fatalf("Wrong number of targets: %v", tg.Targets) + } + tgt := tg.Targets[0] + if tgt[model.AddressLabel] != "mesos-slave1:31000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" { + t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel]) + } + tgt = tg.Targets[1] + if tgt[model.AddressLabel] != "mesos-slave1:32000" { + t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) + } + if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" { + t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel]) + } + default: + t.Fatal("Did not get a target group.") + } +} diff --git a/src/prometheus/discovery/openstack/hypervisor.go b/src/prometheus/discovery/openstack/hypervisor.go new file mode 100644 index 0000000..360eedb --- /dev/null +++ b/src/prometheus/discovery/openstack/hypervisor.go @@ -0,0 +1,145 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + "github.com/gophercloud/gophercloud/pagination" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + openstackLabelHypervisorHostIP = openstackLabelPrefix + "hypervisor_host_ip" + openstackLabelHypervisorHostName = openstackLabelPrefix + "hypervisor_hostname" + openstackLabelHypervisorStatus = openstackLabelPrefix + "hypervisor_status" + openstackLabelHypervisorState = openstackLabelPrefix + "hypervisor_state" + openstackLabelHypervisorType = openstackLabelPrefix + "hypervisor_type" +) + +// HypervisorDiscovery discovers OpenStack hypervisors. +type HypervisorDiscovery struct { + authOpts *gophercloud.AuthOptions + region string + interval time.Duration + logger log.Logger + port int +} + +// NewHypervisorDiscovery returns a new hypervisor discovery. +func NewHypervisorDiscovery(opts *gophercloud.AuthOptions, + interval time.Duration, port int, region string, l log.Logger) *HypervisorDiscovery { + return &HypervisorDiscovery{authOpts: opts, + region: region, interval: interval, port: port, logger: l} +} + +// Run implements the Discoverer interface. +func (h *HypervisorDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // Get an initial set right away. + tg, err := h.refresh() + if err != nil { + level.Error(h.logger).Log("msg", "Unable refresh target groups", "err", err.Error()) + } else { + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + return + } + } + + ticker := time.NewTicker(h.interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + tg, err := h.refresh() + if err != nil { + level.Error(h.logger).Log("msg", "Unable refresh target groups", "err", err.Error()) + continue + } + + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (h *HypervisorDiscovery) refresh() (*targetgroup.Group, error) { + var err error + t0 := time.Now() + defer func() { + refreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + refreshFailuresCount.Inc() + } + }() + + provider, err := openstack.AuthenticatedClient(*h.authOpts) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack session: %s", err) + } + client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: h.region, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack compute session: %s", err) + } + + tg := &targetgroup.Group{ + Source: fmt.Sprintf("OS_" + h.region), + } + // OpenStack API reference + // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details + pagerHypervisors := hypervisors.List(client) + err = pagerHypervisors.EachPage(func(page pagination.Page) (bool, error) { + hypervisorList, err := hypervisors.ExtractHypervisors(page) + if err != nil { + return false, fmt.Errorf("could not extract hypervisors: %s", err) + } + for _, hypervisor := range hypervisorList { + labels := model.LabelSet{ + openstackLabelHypervisorHostIP: model.LabelValue(hypervisor.HostIP), + } + addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port)) + labels[model.AddressLabel] = model.LabelValue(addr) + labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) + labels[openstackLabelHypervisorHostIP] = model.LabelValue(hypervisor.HostIP) + labels[openstackLabelHypervisorStatus] = model.LabelValue(hypervisor.Status) + labels[openstackLabelHypervisorState] = model.LabelValue(hypervisor.State) + labels[openstackLabelHypervisorType] = model.LabelValue(hypervisor.HypervisorType) + tg.Targets = append(tg.Targets, labels) + } + return true, nil + }) + if err != nil { + return nil, err + } + + return tg, nil +} diff --git a/src/prometheus/discovery/openstack/hypervisor_test.go b/src/prometheus/discovery/openstack/hypervisor_test.go new file mode 100644 index 0000000..8bcadce --- /dev/null +++ b/src/prometheus/discovery/openstack/hypervisor_test.go @@ -0,0 +1,82 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/prometheus/common/model" +) + +type OpenstackSDHypervisorTestSuite struct { + suite.Suite + Mock *SDMock +} + +func (s *OpenstackSDHypervisorTestSuite) TearDownSuite() { + s.Mock.ShutdownServer() +} + +func (s *OpenstackSDHypervisorTestSuite) SetupTest() { + s.Mock = NewSDMock(s.T()) + s.Mock.Setup() + + s.Mock.HandleHypervisorListSuccessfully() + + s.Mock.HandleVersionsSuccessfully() + s.Mock.HandleAuthSuccessfully() +} + +func TestOpenstackSDHypervisorSuite(t *testing.T) { + suite.Run(t, new(OpenstackSDHypervisorTestSuite)) +} + +func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (Discovery, error) { + conf := SDConfig{ + IdentityEndpoint: s.Mock.Endpoint(), + Password: "test", + Username: "test", + DomainName: "12345", + Region: "RegionOne", + Role: "hypervisor", + } + return NewDiscovery(&conf, nil) +} + +func (s *OpenstackSDHypervisorTestSuite) TestOpenstackSDHypervisorRefresh() { + hypervisor, _ := s.openstackAuthSuccess() + tg, err := hypervisor.refresh() + assert.Nil(s.T(), err) + require.NotNil(s.T(), tg) + require.NotNil(s.T(), tg.Targets) + require.Len(s.T(), tg.Targets, 2) + + assert.Equal(s.T(), tg.Targets[0]["__address__"], model.LabelValue("172.16.70.14:0")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_hypervisor_hostname"], model.LabelValue("nc14.cloud.com")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_hypervisor_type"], model.LabelValue("QEMU")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_hypervisor_host_ip"], model.LabelValue("172.16.70.14")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_hypervisor_state"], model.LabelValue("up")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_hypervisor_status"], model.LabelValue("enabled")) + + assert.Equal(s.T(), tg.Targets[1]["__address__"], model.LabelValue("172.16.70.13:0")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_hypervisor_hostname"], model.LabelValue("cc13.cloud.com")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_hypervisor_type"], model.LabelValue("QEMU")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_hypervisor_host_ip"], model.LabelValue("172.16.70.13")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_hypervisor_state"], model.LabelValue("up")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_hypervisor_status"], model.LabelValue("enabled")) +} diff --git a/src/prometheus/discovery/openstack/instance.go b/src/prometheus/discovery/openstack/instance.go new file mode 100644 index 0000000..f588f65 --- /dev/null +++ b/src/prometheus/discovery/openstack/instance.go @@ -0,0 +1,215 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/pagination" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + openstackLabelPrefix = model.MetaLabelPrefix + "openstack_" + openstackLabelInstanceID = openstackLabelPrefix + "instance_id" + openstackLabelInstanceName = openstackLabelPrefix + "instance_name" + openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status" + openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor" + openstackLabelPublicIP = openstackLabelPrefix + "public_ip" + openstackLabelPrivateIP = openstackLabelPrefix + "private_ip" + openstackLabelTagPrefix = openstackLabelPrefix + "tag_" +) + +// InstanceDiscovery discovers OpenStack instances. +type InstanceDiscovery struct { + authOpts *gophercloud.AuthOptions + region string + interval time.Duration + logger log.Logger + port int +} + +// NewInstanceDiscovery returns a new instance discovery. +func NewInstanceDiscovery(opts *gophercloud.AuthOptions, + interval time.Duration, port int, region string, l log.Logger) *InstanceDiscovery { + if l == nil { + l = log.NewNopLogger() + } + return &InstanceDiscovery{authOpts: opts, + region: region, interval: interval, port: port, logger: l} +} + +// Run implements the Discoverer interface. +func (i *InstanceDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // Get an initial set right away. + tg, err := i.refresh() + if err != nil { + level.Error(i.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + } else { + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + return + } + } + + ticker := time.NewTicker(i.interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + tg, err := i.refresh() + if err != nil { + level.Error(i.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + continue + } + + select { + case ch <- []*targetgroup.Group{tg}: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (i *InstanceDiscovery) refresh() (*targetgroup.Group, error) { + var err error + t0 := time.Now() + defer func() { + refreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + refreshFailuresCount.Inc() + } + }() + + provider, err := openstack.AuthenticatedClient(*i.authOpts) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack session: %s", err) + } + client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: i.region, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack compute session: %s", err) + } + + // OpenStack API reference + // https://developer.openstack.org/api-ref/compute/#list-floating-ips + pagerFIP := floatingips.List(client) + floatingIPList := make(map[string][]string) + err = pagerFIP.EachPage(func(page pagination.Page) (bool, error) { + result, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + return false, fmt.Errorf("could not extract floatingips: %s", err) + } + for _, ip := range result { + // Skip not associated ips + if ip.InstanceID != "" { + floatingIPList[ip.InstanceID] = append(floatingIPList[ip.InstanceID], ip.IP) + } + } + return true, nil + }) + if err != nil { + return nil, err + } + + // OpenStack API reference + // https://developer.openstack.org/api-ref/compute/#list-servers + opts := servers.ListOpts{} + pager := servers.List(client, opts) + tg := &targetgroup.Group{ + Source: fmt.Sprintf("OS_" + i.region), + } + err = pager.EachPage(func(page pagination.Page) (bool, error) { + instanceList, err := servers.ExtractServers(page) + if err != nil { + return false, fmt.Errorf("could not extract instances: %s", err) + } + + for _, s := range instanceList { + labels := model.LabelSet{ + openstackLabelInstanceID: model.LabelValue(s.ID), + } + if len(s.Addresses) == 0 { + level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + continue + } + for _, address := range s.Addresses { + md, ok := address.([]interface{}) + if !ok { + level.Warn(i.logger).Log("msg", "Invalid type for address, expected array") + continue + } + if len(md) == 0 { + level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID) + continue + } + md1, ok := md[0].(map[string]interface{}) + if !ok { + level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict") + continue + } + addr, ok := md1["addr"].(string) + if !ok { + level.Warn(i.logger).Log("msg", "Invalid type for address, expected string") + continue + } + labels[openstackLabelPrivateIP] = model.LabelValue(addr) + addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port)) + labels[model.AddressLabel] = model.LabelValue(addr) + // Only use first private IP + break + } + if val, ok := floatingIPList[s.ID]; ok && len(val) > 0 { + labels[openstackLabelPublicIP] = model.LabelValue(val[0]) + } + labels[openstackLabelInstanceStatus] = model.LabelValue(s.Status) + labels[openstackLabelInstanceName] = model.LabelValue(s.Name) + id, ok := s.Flavor["id"].(string) + if !ok { + level.Warn(i.logger).Log("msg", "Invalid type for instance id, excepted string") + continue + } + labels[openstackLabelInstanceFlavor] = model.LabelValue(id) + for k, v := range s.Metadata { + name := strutil.SanitizeLabelName(k) + labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) + } + tg.Targets = append(tg.Targets, labels) + } + return true, nil + }) + if err != nil { + return nil, err + } + + return tg, nil +} diff --git a/src/prometheus/discovery/openstack/instance_test.go b/src/prometheus/discovery/openstack/instance_test.go new file mode 100644 index 0000000..49727a2 --- /dev/null +++ b/src/prometheus/discovery/openstack/instance_test.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/prometheus/common/model" +) + +type OpenstackSDInstanceTestSuite struct { + suite.Suite + Mock *SDMock +} + +func (s *OpenstackSDInstanceTestSuite) TearDownSuite() { + s.Mock.ShutdownServer() +} + +func (s *OpenstackSDInstanceTestSuite) SetupTest() { + s.Mock = NewSDMock(s.T()) + s.Mock.Setup() + + s.Mock.HandleServerListSuccessfully() + s.Mock.HandleFloatingIPListSuccessfully() + + s.Mock.HandleVersionsSuccessfully() + s.Mock.HandleAuthSuccessfully() +} + +func TestOpenstackSDInstanceSuite(t *testing.T) { + suite.Run(t, new(OpenstackSDInstanceTestSuite)) +} + +func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (Discovery, error) { + conf := SDConfig{ + IdentityEndpoint: s.Mock.Endpoint(), + Password: "test", + Username: "test", + DomainName: "12345", + Region: "RegionOne", + Role: "instance", + } + return NewDiscovery(&conf, nil) +} + +func (s *OpenstackSDInstanceTestSuite) TestOpenstackSDInstanceRefresh() { + instance, _ := s.openstackAuthSuccess() + tg, err := instance.refresh() + + assert.Nil(s.T(), err) + require.NotNil(s.T(), tg) + require.NotNil(s.T(), tg.Targets) + require.Len(s.T(), tg.Targets, 3) + + assert.Equal(s.T(), tg.Targets[0]["__address__"], model.LabelValue("10.0.0.32:0")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_instance_flavor"], model.LabelValue("1")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_instance_id"], model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_instance_name"], model.LabelValue("herp")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_instance_status"], model.LabelValue("ACTIVE")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_private_ip"], model.LabelValue("10.0.0.32")) + assert.Equal(s.T(), tg.Targets[0]["__meta_openstack_public_ip"], model.LabelValue("10.10.10.2")) + + assert.Equal(s.T(), tg.Targets[1]["__address__"], model.LabelValue("10.0.0.31:0")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_instance_flavor"], model.LabelValue("1")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_instance_id"], model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_instance_name"], model.LabelValue("derp")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_instance_status"], model.LabelValue("ACTIVE")) + assert.Equal(s.T(), tg.Targets[1]["__meta_openstack_private_ip"], model.LabelValue("10.0.0.31")) +} diff --git a/src/prometheus/discovery/openstack/mock.go b/src/prometheus/discovery/openstack/mock.go new file mode 100644 index 0000000..0cd975a --- /dev/null +++ b/src/prometheus/discovery/openstack/mock.go @@ -0,0 +1,565 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +// SDMock is the interface for the OpenStack mock +type SDMock struct { + t *testing.T + Server *httptest.Server + Mux *http.ServeMux +} + +// NewSDMock returns a new SDMock. +func NewSDMock(t *testing.T) *SDMock { + return &SDMock{ + t: t, + } +} + +// Endpoint returns the URI to the mock server +func (m *SDMock) Endpoint() string { + return m.Server.URL + "/" +} + +// Setup creates the mock server +func (m *SDMock) Setup() { + m.Mux = http.NewServeMux() + m.Server = httptest.NewServer(m.Mux) +} + +// ShutdownServer creates the mock server +func (m *SDMock) ShutdownServer() { + m.Server.Close() +} + +const tokenID = "cbc36478b0bd8e67e89469c7749d4127" + +func testMethod(t *testing.T, r *http.Request, expected string) { + if expected != r.Method { + t.Errorf("Request method = %v, expected %v", r.Method, expected) + } +} + +func testHeader(t *testing.T, r *http.Request, header string, expected string) { + if actual := r.Header.Get(header); expected != actual { + t.Errorf("Header %s = %s, expected %s", header, actual, expected) + } +} + +// HandleVersionsSuccessfully mocks version call +func (m *SDMock) HandleVersionsSuccessfully() { + m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "stable", + "id": "v3.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + } + ] + } + } + `, m.Endpoint()+"v3/", m.Endpoint()+"v2.0/") + }) +} + +// HandleAuthSuccessfully mocks auth call +func (m *SDMock) HandleAuthSuccessfully() { + m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Subject-Token", tokenID) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "token": { + "audit_ids": ["VcxU2JYqT8OzfUVvrjEITQ", "qNUTIJntTzO1-XUk5STybw"], + "catalog": [ + { + "endpoints": [ + { + "id": "39dc322ce86c4111b4f06c2eeae0841b", + "interface": "public", + "region": "RegionOne", + "url": "http://localhost:5000" + }, + { + "id": "ec642f27474842e78bf059f6c48f4e99", + "interface": "internal", + "region": "RegionOne", + "url": "http://localhost:5000" + }, + { + "id": "c609fc430175452290b62a4242e8a7e8", + "interface": "admin", + "region": "RegionOne", + "url": "http://localhost:35357" + } + ], + "id": "4363ae44bdf34a3981fde3b823cb9aa2", + "type": "identity", + "name": "keystone" + }, + { + "endpoints": [ + { + "id": "e2ffee808abc4a60916715b1d4b489dd", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "url": "%s" + } + ], + "id": "b7f2a5b1a019459cb956e43a8cb41e31", + "type": "compute" + } + + ], + "expires_at": "2013-02-27T18:30:59.999999Z", + "is_domain": false, + "issued_at": "2013-02-27T16:30:59.999999Z", + "methods": [ + "password" + ], + "project": { + "domain": { + "id": "1789d1", + "name": "example.com" + }, + "id": "263fd9", + "name": "project-x" + }, + "roles": [ + { + "id": "76e72a", + "name": "admin" + }, + { + "id": "f4f392", + "name": "member" + } + ], + "user": { + "domain": { + "id": "1789d1", + "name": "example.com" + }, + "id": "0ca8f6", + "name": "Joe", + "password_expires_at": "2016-11-06T15:32:17.000000" + } + } +} + `, m.Endpoint()) + }) +} + +const hypervisorListBody = ` +{ + "hypervisors": [ + { + "status": "enabled", + "service": { + "host": "nc14.cloud.com", + "disabled_reason": null, + "id": 16 + }, + "vcpus_used": 18, + "hypervisor_type": "QEMU", + "local_gb_used": 84, + "vcpus": 24, + "hypervisor_hostname": "nc14.cloud.com", + "memory_mb_used": 24064, + "memory_mb": 96484, + "current_workload": 1, + "state": "up", + "host_ip": "172.16.70.14", + "cpu_info": "{\"vendor\": \"Intel\", \"model\": \"IvyBridge\", \"arch\": \"x86_64\", \"features\": [\"pge\", \"avx\", \"clflush\", \"sep\", \"syscall\", \"vme\", \"dtes64\", \"msr\", \"fsgsbase\", \"xsave\", \"vmx\", \"erms\", \"xtpr\", \"cmov\", \"smep\", \"ssse3\", \"est\", \"pat\", \"monitor\", \"smx\", \"pbe\", \"lm\", \"tsc\", \"nx\", \"fxsr\", \"tm\", \"sse4.1\", \"pae\", \"sse4.2\", \"pclmuldq\", \"acpi\", \"tsc-deadline\", \"mmx\", \"osxsave\", \"cx8\", \"mce\", \"de\", \"tm2\", \"ht\", \"dca\", \"lahf_lm\", \"popcnt\", \"mca\", \"pdpe1gb\", \"apic\", \"sse\", \"f16c\", \"pse\", \"ds\", \"invtsc\", \"pni\", \"rdtscp\", \"aes\", \"sse2\", \"ss\", \"ds_cpl\", \"pcid\", \"fpu\", \"cx16\", \"pse36\", \"mtrr\", \"pdcm\", \"rdrand\", \"x2apic\"], \"topology\": {\"cores\": 6, \"cells\": 2, \"threads\": 2, \"sockets\": 1}}", + "running_vms": 10, + "free_disk_gb": 315, + "hypervisor_version": 2003000, + "disk_available_least": 304, + "local_gb": 399, + "free_ram_mb": 72420, + "id": 1 + }, + { + "status": "enabled", + "service": { + "host": "cc13.cloud.com", + "disabled_reason": null, + "id": 17 + }, + "vcpus_used": 1, + "hypervisor_type": "QEMU", + "local_gb_used": 20, + "vcpus": 24, + "hypervisor_hostname": "cc13.cloud.com", + "memory_mb_used": 2560, + "memory_mb": 96484, + "current_workload": 0, + "state": "up", + "host_ip": "172.16.70.13", + "cpu_info": "{\"vendor\": \"Intel\", \"model\": \"IvyBridge\", \"arch\": \"x86_64\", \"features\": [\"pge\", \"avx\", \"clflush\", \"sep\", \"syscall\", \"vme\", \"dtes64\", \"msr\", \"fsgsbase\", \"xsave\", \"vmx\", \"erms\", \"xtpr\", \"cmov\", \"smep\", \"ssse3\", \"est\", \"pat\", \"monitor\", \"smx\", \"pbe\", \"lm\", \"tsc\", \"nx\", \"fxsr\", \"tm\", \"sse4.1\", \"pae\", \"sse4.2\", \"pclmuldq\", \"acpi\", \"tsc-deadline\", \"mmx\", \"osxsave\", \"cx8\", \"mce\", \"de\", \"tm2\", \"ht\", \"dca\", \"lahf_lm\", \"popcnt\", \"mca\", \"pdpe1gb\", \"apic\", \"sse\", \"f16c\", \"pse\", \"ds\", \"invtsc\", \"pni\", \"rdtscp\", \"aes\", \"sse2\", \"ss\", \"ds_cpl\", \"pcid\", \"fpu\", \"cx16\", \"pse36\", \"mtrr\", \"pdcm\", \"rdrand\", \"x2apic\"], \"topology\": {\"cores\": 6, \"cells\": 2, \"threads\": 2, \"sockets\": 1}}", + "running_vms": 0, + "free_disk_gb": 379, + "hypervisor_version": 2003000, + "disk_available_least": 384, + "local_gb": 399, + "free_ram_mb": 93924, + "id": 721 + } + ] +}` + +// HandleHypervisorListSuccessfully mocks os-hypervisors detail call +func (m *SDMock) HandleHypervisorListSuccessfully() { + m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, "GET") + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, hypervisorListBody) + }) +} + +const serverListBody = ` +{ + "servers": [ + { + "status": "ERROR", + "updated": "2014-09-25T13:10:10Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": {}, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/af9bcad9-3c87-477d-9347-b291eabf480e", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/af9bcad9-3c87-477d-9347-b291eabf480e", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "error", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000010", + "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "af9bcad9-3c87-477d-9347-b291eabf480e", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "herp2", + "created": "2014-09-25T13:10:02Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-25T13:10:10Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", + "version": 4, + "addr": "10.0.0.32", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001e", + "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "herp", + "created": "2014-09-25T13:10:02Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "derp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "merp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + } + ] +} +` + +// HandleServerListSuccessfully mocks server detail call +func (m *SDMock) HandleServerListSuccessfully() { + m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, "GET") + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, serverListBody) + }) +} + +const listOutput = ` +{ + "floating_ips": [ + { + "fixed_ip": null, + "id": "1", + "instance_id": null, + "ip": "10.10.10.1", + "pool": "nova" + }, + { + "fixed_ip": "166.78.185.201", + "id": "2", + "instance_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "ip": "10.10.10.2", + "pool": "nova" + } + ] +} +` + +// HandleFloatingIPListSuccessfully mocks floating ips call +func (m *SDMock) HandleFloatingIPListSuccessfully() { + m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, "GET") + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, listOutput) + }) +} diff --git a/src/prometheus/discovery/openstack/openstack.go b/src/prometheus/discovery/openstack/openstack.go new file mode 100644 index 0000000..0103edb --- /dev/null +++ b/src/prometheus/discovery/openstack/openstack.go @@ -0,0 +1,153 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/go-kit/kit/log" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +var ( + refreshFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_openstack_refresh_failures_total", + Help: "The number of OpenStack-SD scrape failures.", + }) + refreshDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_openstack_refresh_duration_seconds", + Help: "The duration of an OpenStack-SD refresh in seconds.", + }) + // DefaultSDConfig is the default OpenStack SD configuration. + DefaultSDConfig = SDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + } +) + +// SDConfig is the configuration for OpenStack based service discovery. +type SDConfig struct { + IdentityEndpoint string `yaml:"identity_endpoint"` + Username string `yaml:"username"` + UserID string `yaml:"userid"` + Password config_util.Secret `yaml:"password"` + ProjectName string `yaml:"project_name"` + ProjectID string `yaml:"project_id"` + DomainName string `yaml:"domain_name"` + DomainID string `yaml:"domain_id"` + Role Role `yaml:"role"` + Region string `yaml:"region"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Port int `yaml:"port"` +} + +// OpenStackRole is role of the target in OpenStack. +type Role string + +// The valid options for OpenStackRole. +const ( + // OpenStack document reference + // https://docs.openstack.org/nova/pike/admin/arch.html#hypervisors + OpenStackRoleHypervisor Role = "hypervisor" + // OpenStack document reference + // https://docs.openstack.org/horizon/pike/user/launch-instances.html + OpenStackRoleInstance Role = "instance" +) + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { + if err := unmarshal((*string)(c)); err != nil { + return err + } + switch *c { + case OpenStackRoleHypervisor, OpenStackRoleInstance: + return nil + default: + return fmt.Errorf("Unknown OpenStack SD role %q", *c) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.Role == "" { + return fmt.Errorf("role missing (one of: instance, hypervisor)") + } + if c.Region == "" { + return fmt.Errorf("Openstack SD configuration requires a region") + } + return nil +} + +func init() { + prometheus.MustRegister(refreshFailuresCount) + prometheus.MustRegister(refreshDuration) +} + +// Discovery periodically performs OpenStack-SD requests. It implements +// the Discoverer interface. +type Discovery interface { + Run(ctx context.Context, ch chan<- []*targetgroup.Group) + refresh() (tg *targetgroup.Group, err error) +} + +// NewDiscovery returns a new OpenStackDiscovery which periodically refreshes its targets. +func NewDiscovery(conf *SDConfig, l log.Logger) (Discovery, error) { + var opts gophercloud.AuthOptions + if conf.IdentityEndpoint == "" { + var err error + opts, err = openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + } else { + opts = gophercloud.AuthOptions{ + IdentityEndpoint: conf.IdentityEndpoint, + Username: conf.Username, + UserID: conf.UserID, + Password: string(conf.Password), + TenantName: conf.ProjectName, + TenantID: conf.ProjectID, + DomainName: conf.DomainName, + DomainID: conf.DomainID, + } + } + switch conf.Role { + case OpenStackRoleHypervisor: + hypervisor := NewHypervisorDiscovery(&opts, + time.Duration(conf.RefreshInterval), conf.Port, conf.Region, l) + return hypervisor, nil + case OpenStackRoleInstance: + instance := NewInstanceDiscovery(&opts, + time.Duration(conf.RefreshInterval), conf.Port, conf.Region, l) + return instance, nil + default: + return nil, errors.New("unknown OpenStack discovery role") + } +} diff --git a/src/prometheus/discovery/targetgroup/targetgroup.go b/src/prometheus/discovery/targetgroup/targetgroup.go new file mode 100644 index 0000000..d1dfc73 --- /dev/null +++ b/src/prometheus/discovery/targetgroup/targetgroup.go @@ -0,0 +1,93 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package targetgroup + +import ( + "bytes" + "encoding/json" + + "github.com/prometheus/common/model" +) + +// Group is a set of targets with a common label set(production , test, staging etc.). +type Group struct { + // Targets is a list of targets identified by a label set. Each target is + // uniquely identifiable in the group by its address label. + Targets []model.LabelSet + // Labels is a set of labels that is common across all targets in the group. + Labels model.LabelSet + + // Source is an identifier that describes a group of targets. + Source string +} + +func (tg Group) String() string { + return tg.Source +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { + g := struct { + Targets []string `yaml:"targets"` + Labels model.LabelSet `yaml:"labels"` + }{} + if err := unmarshal(&g); err != nil { + return err + } + tg.Targets = make([]model.LabelSet, 0, len(g.Targets)) + for _, t := range g.Targets { + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: model.LabelValue(t), + }) + } + tg.Labels = g.Labels + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (tg Group) MarshalYAML() (interface{}, error) { + g := &struct { + Targets []string `yaml:"targets"` + Labels model.LabelSet `yaml:"labels,omitempty"` + }{ + Targets: make([]string, 0, len(tg.Targets)), + Labels: tg.Labels, + } + for _, t := range tg.Targets { + g.Targets = append(g.Targets, string(t[model.AddressLabel])) + } + return g, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (tg *Group) UnmarshalJSON(b []byte) error { + g := struct { + Targets []string `json:"targets"` + Labels model.LabelSet `json:"labels"` + }{} + + dec := json.NewDecoder(bytes.NewReader(b)) + dec.DisallowUnknownFields() + if err := dec.Decode(&g); err != nil { + return err + } + tg.Targets = make([]model.LabelSet, 0, len(g.Targets)) + for _, t := range g.Targets { + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: model.LabelValue(t), + }) + } + tg.Labels = g.Labels + return nil +} diff --git a/src/prometheus/discovery/targetgroup/targetgroup_test.go b/src/prometheus/discovery/targetgroup/targetgroup_test.go new file mode 100644 index 0000000..0087b27 --- /dev/null +++ b/src/prometheus/discovery/targetgroup/targetgroup_test.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package targetgroup + +import ( + "errors" + "testing" + + "github.com/prometheus/prometheus/util/testutil" +) + +func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { + tests := []struct { + json string + expectedReply error + }{ + { + json: ` {"labels": {},"targets": []}`, + expectedReply: nil, + }, + { + json: ` {"label": {},"targets": []}`, + expectedReply: errors.New("json: unknown field \"label\""), + }, + { + json: ` {"labels": {},"target": []}`, + expectedReply: errors.New("json: unknown field \"target\""), + }, + } + tg := Group{} + + for _, test := range tests { + actual := tg.UnmarshalJSON([]byte(test.json)) + testutil.Equals(t, test.expectedReply, actual) + } + +} diff --git a/src/prometheus/discovery/triton/triton.go b/src/prometheus/discovery/triton/triton.go new file mode 100644 index 0000000..09914fa --- /dev/null +++ b/src/prometheus/discovery/triton/triton.go @@ -0,0 +1,226 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package triton + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/mwitkow/go-conntrack" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + + config_util "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + tritonLabel = model.MetaLabelPrefix + "triton_" + tritonLabelMachineID = tritonLabel + "machine_id" + tritonLabelMachineAlias = tritonLabel + "machine_alias" + tritonLabelMachineBrand = tritonLabel + "machine_brand" + tritonLabelMachineImage = tritonLabel + "machine_image" + tritonLabelServerID = tritonLabel + "server_id" + namespace = "prometheus" +) + +var ( + refreshFailuresCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_triton_refresh_failures_total", + Help: "The number of Triton-SD scrape failures.", + }) + refreshDuration = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_triton_refresh_duration_seconds", + Help: "The duration of a Triton-SD refresh in seconds.", + }) + // DefaultSDConfig is the default Triton SD configuration. + DefaultSDConfig = SDConfig{ + Port: 9163, + RefreshInterval: model.Duration(60 * time.Second), + Version: 1, + } +) + +// SDConfig is the configuration for Triton based service discovery. +type SDConfig struct { + Account string `yaml:"account"` + DNSSuffix string `yaml:"dns_suffix"` + Endpoint string `yaml:"endpoint"` + Port int `yaml:"port"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"` + Version int `yaml:"version"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.Account == "" { + return fmt.Errorf("Triton SD configuration requires an account") + } + if c.DNSSuffix == "" { + return fmt.Errorf("Triton SD configuration requires a dns_suffix") + } + if c.Endpoint == "" { + return fmt.Errorf("Triton SD configuration requires an endpoint") + } + if c.RefreshInterval <= 0 { + return fmt.Errorf("Triton SD configuration requires RefreshInterval to be a positive integer") + } + return nil +} + +func init() { + prometheus.MustRegister(refreshFailuresCount) + prometheus.MustRegister(refreshDuration) +} + +// DiscoveryResponse models a JSON response from the Triton discovery. +type DiscoveryResponse struct { + Containers []struct { + ServerUUID string `json:"server_uuid"` + VMAlias string `json:"vm_alias"` + VMBrand string `json:"vm_brand"` + VMImageUUID string `json:"vm_image_uuid"` + VMUUID string `json:"vm_uuid"` + } `json:"containers"` +} + +// Discovery periodically performs Triton-SD requests. It implements +// the Discoverer interface. +type Discovery struct { + client *http.Client + interval time.Duration + logger log.Logger + sdConfig *SDConfig +} + +// New returns a new Discovery which periodically refreshes its targets. +func New(logger log.Logger, conf *SDConfig) (*Discovery, error) { + if logger == nil { + logger = log.NewNopLogger() + } + + tls, err := config_util.NewTLSConfig(&conf.TLSConfig) + if err != nil { + return nil, err + } + + transport := &http.Transport{ + TLSClientConfig: tls, + DialContext: conntrack.NewDialContextFunc( + conntrack.DialWithTracing(), + conntrack.DialWithName("triton_sd"), + ), + } + client := &http.Client{Transport: transport} + + return &Discovery{ + client: client, + interval: time.Duration(conf.RefreshInterval), + logger: logger, + sdConfig: conf, + }, nil +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer close(ch) + + ticker := time.NewTicker(d.interval) + defer ticker.Stop() + + // Get an initial set right away. + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Refreshing targets failed", "err", err) + } else { + ch <- []*targetgroup.Group{tg} + } + + for { + select { + case <-ticker.C: + tg, err := d.refresh() + if err != nil { + level.Error(d.logger).Log("msg", "Refreshing targets failed", "err", err) + } else { + ch <- []*targetgroup.Group{tg} + } + case <-ctx.Done(): + return + } + } +} + +func (d *Discovery) refresh() (tg *targetgroup.Group, err error) { + t0 := time.Now() + defer func() { + refreshDuration.Observe(time.Since(t0).Seconds()) + if err != nil { + refreshFailuresCount.Inc() + } + }() + + var endpoint = fmt.Sprintf("https://%s:%d/v%d/discover", d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) + tg = &targetgroup.Group{ + Source: endpoint, + } + + resp, err := d.client.Get(endpoint) + if err != nil { + return tg, fmt.Errorf("an error occurred when requesting targets from the discovery endpoint. %s", err) + } + + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tg, fmt.Errorf("an error occurred when reading the response body. %s", err) + } + + dr := DiscoveryResponse{} + err = json.Unmarshal(data, &dr) + if err != nil { + return tg, fmt.Errorf("an error occurred unmarshaling the disovery response json. %s", err) + } + + for _, container := range dr.Containers { + labels := model.LabelSet{ + tritonLabelMachineID: model.LabelValue(container.VMUUID), + tritonLabelMachineAlias: model.LabelValue(container.VMAlias), + tritonLabelMachineBrand: model.LabelValue(container.VMBrand), + tritonLabelMachineImage: model.LabelValue(container.VMImageUUID), + tritonLabelServerID: model.LabelValue(container.ServerUUID), + } + addr := fmt.Sprintf("%s.%s:%d", container.VMUUID, d.sdConfig.DNSSuffix, d.sdConfig.Port) + labels[model.AddressLabel] = model.LabelValue(addr) + tg.Targets = append(tg.Targets, labels) + } + + return tg, nil +} diff --git a/src/prometheus/discovery/triton/triton_test.go b/src/prometheus/discovery/triton/triton_test.go new file mode 100644 index 0000000..47f131b --- /dev/null +++ b/src/prometheus/discovery/triton/triton_test.go @@ -0,0 +1,183 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package triton + +import ( + "context" + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +var ( + conf = SDConfig{ + Account: "testAccount", + DNSSuffix: "triton.example.com", + Endpoint: "127.0.0.1", + Port: 443, + Version: 1, + RefreshInterval: 1, + TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, + } + badconf = SDConfig{ + Account: "badTestAccount", + DNSSuffix: "bad.triton.example.com", + Endpoint: "127.0.0.1", + Port: 443, + Version: 1, + RefreshInterval: 1, + TLSConfig: config.TLSConfig{ + InsecureSkipVerify: false, + KeyFile: "shouldnotexist.key", + CAFile: "shouldnotexist.ca", + CertFile: "shouldnotexist.cert", + }, + } +) + +func TestTritonSDNew(t *testing.T) { + td, err := New(nil, &conf) + assert.Nil(t, err) + assert.NotNil(t, td) + assert.NotNil(t, td.client) + assert.NotNil(t, td.interval) + assert.NotNil(t, td.sdConfig) + assert.Equal(t, conf.Account, td.sdConfig.Account) + assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix) + assert.Equal(t, conf.Endpoint, td.sdConfig.Endpoint) + assert.Equal(t, conf.Port, td.sdConfig.Port) +} + +func TestTritonSDNewBadConfig(t *testing.T) { + td, err := New(nil, &badconf) + assert.NotNil(t, err) + assert.Nil(t, td) +} + +func TestTritonSDRun(t *testing.T) { + var ( + td, err = New(nil, &conf) + ch = make(chan []*targetgroup.Group) + ctx, cancel = context.WithCancel(context.Background()) + ) + + assert.Nil(t, err) + assert.NotNil(t, td) + + wait := make(chan struct{}) + go func() { + td.Run(ctx, ch) + close(wait) + }() + + select { + case <-time.After(60 * time.Millisecond): + // Expected. + case tgs := <-ch: + t.Fatalf("Unexpected target groups in triton discovery: %s", tgs) + } + + cancel() + <-wait +} + +func TestTritonSDRefreshNoTargets(t *testing.T) { + tgts := testTritonSDRefresh(t, "{\"containers\":[]}") + assert.Nil(t, tgts) +} + +func TestTritonSDRefreshMultipleTargets(t *testing.T) { + var ( + dstr = `{"containers":[ + { + "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", + "vm_alias":"server01", + "vm_brand":"lx", + "vm_image_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7", + "vm_uuid":"ad466fbf-46a2-4027-9b64-8d3cdb7e9072" + }, + { + "server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6", + "vm_alias":"server02", + "vm_brand":"kvm", + "vm_image_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6", + "vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7" + }] + }` + ) + + tgts := testTritonSDRefresh(t, dstr) + assert.NotNil(t, tgts) + assert.Equal(t, 2, len(tgts)) +} + +func TestTritonSDRefreshNoServer(t *testing.T) { + var ( + td, err = New(nil, &conf) + ) + assert.Nil(t, err) + assert.NotNil(t, td) + + tg, rerr := td.refresh() + assert.NotNil(t, rerr) + assert.Contains(t, rerr.Error(), "an error occurred when requesting targets from the discovery endpoint.") + assert.NotNil(t, tg) + assert.Nil(t, tg.Targets) +} + +func testTritonSDRefresh(t *testing.T, dstr string) []model.LabelSet { + var ( + td, err = New(nil, &conf) + s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, dstr) + })) + ) + + defer s.Close() + + u, uperr := url.Parse(s.URL) + assert.Nil(t, uperr) + assert.NotNil(t, u) + + host, strport, sherr := net.SplitHostPort(u.Host) + assert.Nil(t, sherr) + assert.NotNil(t, host) + assert.NotNil(t, strport) + + port, atoierr := strconv.Atoi(strport) + assert.Nil(t, atoierr) + assert.NotNil(t, port) + + td.sdConfig.Port = port + + assert.Nil(t, err) + assert.NotNil(t, td) + + tg, err := td.refresh() + assert.Nil(t, err) + assert.NotNil(t, tg) + + return tg.Targets +} diff --git a/src/prometheus/discovery/zookeeper/zookeeper.go b/src/prometheus/discovery/zookeeper/zookeeper.go new file mode 100644 index 0000000..60a26e8 --- /dev/null +++ b/src/prometheus/discovery/zookeeper/zookeeper.go @@ -0,0 +1,278 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zookeeper + +import ( + "context" + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" + "github.com/samuel/go-zookeeper/zk" + + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" + "github.com/prometheus/prometheus/util/treecache" +) + +var ( + // DefaultServersetSDConfig is the default Serverset SD configuration. + DefaultServersetSDConfig = ServersetSDConfig{ + Timeout: model.Duration(10 * time.Second), + } + // DefaultNerveSDConfig is the default Nerve SD configuration. + DefaultNerveSDConfig = NerveSDConfig{ + Timeout: model.Duration(10 * time.Second), + } +) + +// ServersetSDConfig is the configuration for Twitter serversets in Zookeeper based discovery. +type ServersetSDConfig struct { + Servers []string `yaml:"servers"` + Paths []string `yaml:"paths"` + Timeout model.Duration `yaml:"timeout,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultServersetSDConfig + type plain ServersetSDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if len(c.Servers) == 0 { + return fmt.Errorf("serverset SD config must contain at least one Zookeeper server") + } + if len(c.Paths) == 0 { + return fmt.Errorf("serverset SD config must contain at least one path") + } + for _, path := range c.Paths { + if !strings.HasPrefix(path, "/") { + return fmt.Errorf("serverset SD config paths must begin with '/': %s", path) + } + } + return nil +} + +// NerveSDConfig is the configuration for AirBnB's Nerve in Zookeeper based discovery. +type NerveSDConfig struct { + Servers []string `yaml:"servers"` + Paths []string `yaml:"paths"` + Timeout model.Duration `yaml:"timeout,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultNerveSDConfig + type plain NerveSDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if len(c.Servers) == 0 { + return fmt.Errorf("nerve SD config must contain at least one Zookeeper server") + } + if len(c.Paths) == 0 { + return fmt.Errorf("nerve SD config must contain at least one path") + } + for _, path := range c.Paths { + if !strings.HasPrefix(path, "/") { + return fmt.Errorf("nerve SD config paths must begin with '/': %s", path) + } + } + return nil +} + +// Discovery implements the Discoverer interface for discovering +// targets from Zookeeper. +type Discovery struct { + conn *zk.Conn + + sources map[string]*targetgroup.Group + + updates chan treecache.ZookeeperTreeCacheEvent + treeCaches []*treecache.ZookeeperTreeCache + + parse func(data []byte, path string) (model.LabelSet, error) + logger log.Logger +} + +// NewNerveDiscovery returns a new Discovery for the given Nerve config. +func NewNerveDiscovery(conf *NerveSDConfig, logger log.Logger) *Discovery { + return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember) +} + +// NewServersetDiscovery returns a new Discovery for the given serverset config. +func NewServersetDiscovery(conf *ServersetSDConfig, logger log.Logger) *Discovery { + return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember) +} + +// NewDiscovery returns a new discovery along Zookeeper parses with +// the given parse function. +func NewDiscovery( + srvs []string, + timeout time.Duration, + paths []string, + logger log.Logger, + pf func(data []byte, path string) (model.LabelSet, error), +) *Discovery { + if logger == nil { + logger = log.NewNopLogger() + } + + conn, _, err := zk.Connect(srvs, timeout) + conn.SetLogger(treecache.NewZookeeperLogger(logger)) + if err != nil { + return nil + } + updates := make(chan treecache.ZookeeperTreeCacheEvent) + sd := &Discovery{ + conn: conn, + updates: updates, + sources: map[string]*targetgroup.Group{}, + parse: pf, + logger: logger, + } + for _, path := range paths { + sd.treeCaches = append(sd.treeCaches, treecache.NewZookeeperTreeCache(conn, path, updates, logger)) + } + return sd +} + +// Run implements the Discoverer interface. +func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + defer func() { + for _, tc := range d.treeCaches { + tc.Stop() + } + // Drain event channel in case the treecache leaks goroutines otherwise. + for range d.updates { + } + d.conn.Close() + }() + + for { + select { + case <-ctx.Done(): + return + case event := <-d.updates: + tg := &targetgroup.Group{ + Source: event.Path, + } + if event.Data != nil { + labelSet, err := d.parse(*event.Data, event.Path) + if err == nil { + tg.Targets = []model.LabelSet{labelSet} + d.sources[event.Path] = tg + } else { + delete(d.sources, event.Path) + } + } else { + delete(d.sources, event.Path) + } + select { + case <-ctx.Done(): + return + case ch <- []*targetgroup.Group{tg}: + } + } + } +} + +const ( + serversetLabelPrefix = model.MetaLabelPrefix + "serverset_" + serversetStatusLabel = serversetLabelPrefix + "status" + serversetPathLabel = serversetLabelPrefix + "path" + serversetEndpointLabelPrefix = serversetLabelPrefix + "endpoint" + serversetShardLabel = serversetLabelPrefix + "shard" +) + +type serversetMember struct { + ServiceEndpoint serversetEndpoint + AdditionalEndpoints map[string]serversetEndpoint + Status string `json:"status"` + Shard int `json:"shard"` +} + +type serversetEndpoint struct { + Host string + Port int +} + +func parseServersetMember(data []byte, path string) (model.LabelSet, error) { + member := serversetMember{} + + if err := json.Unmarshal(data, &member); err != nil { + return nil, fmt.Errorf("error unmarshaling serverset member %q: %s", path, err) + } + + labels := model.LabelSet{} + labels[serversetPathLabel] = model.LabelValue(path) + labels[model.AddressLabel] = model.LabelValue( + net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) + + labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) + labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) + + for name, endpoint := range member.AdditionalEndpoints { + cleanName := model.LabelName(strutil.SanitizeLabelName(name)) + labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( + endpoint.Host) + labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( + fmt.Sprintf("%d", endpoint.Port)) + + } + + labels[serversetStatusLabel] = model.LabelValue(member.Status) + labels[serversetShardLabel] = model.LabelValue(strconv.Itoa(member.Shard)) + + return labels, nil +} + +const ( + nerveLabelPrefix = model.MetaLabelPrefix + "nerve_" + nervePathLabel = nerveLabelPrefix + "path" + nerveEndpointLabelPrefix = nerveLabelPrefix + "endpoint" +) + +type nerveMember struct { + Host string `json:"host"` + Port int `json:"port"` + Name string `json:"name"` +} + +func parseNerveMember(data []byte, path string) (model.LabelSet, error) { + member := nerveMember{} + err := json.Unmarshal(data, &member) + if err != nil { + return nil, fmt.Errorf("error unmarshaling nerve member %q: %s", path, err) + } + + labels := model.LabelSet{} + labels[nervePathLabel] = model.LabelValue(path) + labels[model.AddressLabel] = model.LabelValue( + net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) + + labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) + labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) + labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) + + return labels, nil +} diff --git a/src/prometheus/docs/configuration/alerting_rules.md b/src/prometheus/docs/configuration/alerting_rules.md new file mode 100644 index 0000000..cc674a0 --- /dev/null +++ b/src/prometheus/docs/configuration/alerting_rules.md @@ -0,0 +1,103 @@ +--- +title: Alerting rules +sort_rank: 3 +--- + +# Alerting rules + +Alerting rules allow you to define alert conditions based on Prometheus +expression language expressions and to send notifications about firing alerts +to an external service. Whenever the alert expression results in one or more +vector elements at a given point in time, the alert counts as active for these +elements' label sets. + +### Defining alerting rules + +Alerting rules are configured in Prometheus in the same way as [recording +rules](recording_rules.md). + +An example rules file with an alert would be: + +```yaml +groups: +- name: example + rules: + - alert: HighErrorRate + expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 + for: 10m + labels: + severity: page + annotations: + summary: High request latency +``` + +The optional `for` clause causes Prometheus to wait for a certain duration +between first encountering a new expression output vector element and counting an alert as firing for this element. In this case, Prometheus will check that the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. + +The `labels` clause allows specifying a set of additional labels to be attached +to the alert. Any existing conflicting labels will be overwritten. The label +values can be templated. + +The `annotations` clause specifies a set of informational labels that can be used to store longer additional information such as alert descriptions or runbook links. The annotation values can be templated. + +#### Templating + +Label and annotation values can be templated using [console templates](https://prometheus.io/docs/visualization/consoles). +The `$labels` variable holds the label key/value pairs of an alert instance +and `$value` holds the evaluated value of an alert instance. + + # To insert a firing element's label values: + {{ $labels. }} + # To insert the numeric expression value of the firing element: + {{ $value }} + +Examples: + +```yaml +groups: +- name: example + rules: + + # Alert for any instance that is unreachable for >5 minutes. + - alert: InstanceDown + expr: up == 0 + for: 5m + labels: + severity: page + annotations: + summary: "Instance {{ $labels.instance }} down" + description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes." + + # Alert for any instance that has a median request latency >1s. + - alert: APIHighRequestLatency + expr: api_http_request_latencies_second{quantile="0.5"} > 1 + for: 10m + annotations: + summary: "High request latency on {{ $labels.instance }}" + description: "{{ $labels.instance }} has a median request latency above 1s (current value: {{ $value }}s)" +``` + +### Inspecting alerts during runtime + +To manually inspect which alerts are active (pending or firing), navigate to +the "Alerts" tab of your Prometheus instance. This will show you the exact +label sets for which each defined alert is currently active. + +For pending and firing alerts, Prometheus also stores synthetic time series of +the form `ALERTS{alertname="", alertstate="pending|firing", }`. +The sample value is set to `1` as long as the alert is in the indicated active +(pending or firing) state, and the series is marked stale when this is no +longer the case. + +### Sending alert notifications + +Prometheus's alerting rules are good at figuring what is broken *right now*, but +they are not a fully-fledged notification solution. Another layer is needed to +add summarization, notification rate limiting, silencing and alert dependencies +on top of the simple alert definitions. In Prometheus's ecosystem, the +[Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) takes on this +role. Thus, Prometheus may be configured to periodically send information about +alert states to an Alertmanager instance, which then takes care of dispatching +the right notifications. +Prometheus can be [configured](configuration.md) to automatically discovered available +Alertmanager instances through its service discovery integrations. diff --git a/src/prometheus/docs/configuration/configuration.md b/src/prometheus/docs/configuration/configuration.md new file mode 100644 index 0000000..7aeba68 --- /dev/null +++ b/src/prometheus/docs/configuration/configuration.md @@ -0,0 +1,1267 @@ +--- +title: Configuration +sort_rank: 1 +--- + +# Configuration + +Prometheus is configured via command-line flags and a configuration file. While +the command-line flags configure immutable system parameters (such as storage +locations, amount of data to keep on disk and in memory, etc.), the +configuration file defines everything related to scraping [jobs and their +instances](https://prometheus.io/docs/concepts/jobs_instances/), as well as +which [rule files to load](recording_rules.md#configuring-rules). + +To view all available command-line flags, run `./prometheus -h`. + +Prometheus can reload its configuration at runtime. If the new configuration +is not well-formed, the changes will not be applied. +A configuration reload is triggered by sending a `SIGHUP` to the Prometheus process or +sending a HTTP POST request to the `/-/reload` endpoint (when the `--web.enable-lifecycle` flag is enabled). +This will also reload any configured rule files. + +## Configuration file + +To specify which configuration file to load, use the `--config.file` flag. + +The file is written in [YAML format](http://en.wikipedia.org/wiki/YAML), +defined by the scheme described below. +Brackets indicate that a parameter is optional. For non-list parameters the +value is set to the specified default. + +Generic placeholders are defined as follows: + +* ``: a boolean that can take the values `true` or `false` +* ``: a duration matching the regular expression `[0-9]+(ms|[smhdwy])` +* ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` +* ``: a string of unicode characters +* ``: a valid path in the current working directory +* ``: a valid string consisting of a hostname or IP followed by an optional port number +* ``: a valid URL path +* ``: a string that can take the values `http` or `https` +* ``: a regular string +* ``: a regular string that is a secret, such as a password +* ``: a string which is template-expanded before usage + +The other placeholders are specified separately. + +A valid example file can be found [here](/config/testdata/conf.good.yml). + +The global configuration specifies parameters that are valid in all other configuration +contexts. They also serve as defaults for other configuration sections. + +```yaml +global: + # How frequently to scrape targets by default. + [ scrape_interval: | default = 1m ] + + # How long until a scrape request times out. + [ scrape_timeout: | default = 10s ] + + # How frequently to evaluate rules. + [ evaluation_interval: | default = 1m ] + + # The labels to add to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + [ : ... ] + +# Rule files specifies a list of globs. Rules and alerts are read from +# all matching files. +rule_files: + [ - ... ] + +# A list of scrape configurations. +scrape_configs: + [ - ... ] + +# Alerting specifies settings related to the Alertmanager. +alerting: + alert_relabel_configs: + [ - ... ] + alertmanagers: + [ - ... ] + +# Settings related to the remote write feature. +remote_write: + [ - ... ] + +# Settings related to the remote read feature. +remote_read: + [ - ... ] +``` + +### `` + +A `scrape_config` section specifies a set of targets and parameters describing how +to scrape them. In the general case, one scrape configuration specifies a single +job. In advanced configurations, this may change. + +Targets may be statically configured via the `static_configs` parameter or +dynamically discovered using one of the supported service-discovery mechanisms. + +Additionally, `relabel_configs` allow advanced modifications to any +target and its labels before scraping. + +```yaml +# The job name assigned to scraped metrics by default. +job_name: + +# How frequently to scrape targets from this job. +[ scrape_interval: | default = ] + +# Per-scrape timeout when scraping this job. +[ scrape_timeout: | default = ] + +# The HTTP resource path on which to fetch metrics from targets. +[ metrics_path: | default = /metrics ] + +# honor_labels controls how Prometheus handles conflicts between labels that are +# already present in scraped data and labels that Prometheus would attach +# server-side ("job" and "instance" labels, manually configured target +# labels, and labels generated by service discovery implementations). +# +# If honor_labels is set to "true", label conflicts are resolved by keeping label +# values from the scraped data and ignoring the conflicting server-side labels. +# +# If honor_labels is set to "false", label conflicts are resolved by renaming +# conflicting labels in the scraped data to "exported_" (for +# example "exported_instance", "exported_job") and then attaching server-side +# labels. This is useful for use cases such as federation, where all labels +# specified in the target should be preserved. +# +# Note that any globally configured "external_labels" are unaffected by this +# setting. In communication with external systems, they are always applied only +# when a time series does not have a given label yet and are ignored otherwise. +[ honor_labels: | default = false ] + +# Configures the protocol scheme used for requests. +[ scheme: | default = http ] + +# Optional HTTP URL parameters. +params: + [ : [, ...] ] + +# Sets the `Authorization` header on every scrape request with the +# configured username and password. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every scrape request with +# the configured bearer token. It is mutually exclusive with `bearer_token_file`. +[ bearer_token: ] + +# Sets the `Authorization` header on every scrape request with the bearer token +# read from the configured file. It is mutually exclusive with `bearer_token`. +[ bearer_token_file: /path/to/bearer/token/file ] + +# Configures the scrape request's TLS settings. +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] + +# List of Azure service discovery configurations. +azure_sd_configs: + [ - ... ] + +# List of Consul service discovery configurations. +consul_sd_configs: + [ - ... ] + +# List of DNS service discovery configurations. +dns_sd_configs: + [ - ... ] + +# List of EC2 service discovery configurations. +ec2_sd_configs: + [ - ... ] + +# List of OpenStack service discovery configurations. +openstack_sd_configs: + [ - ... ] + +# List of file service discovery configurations. +file_sd_configs: + [ - ... ] + +# List of GCE service discovery configurations. +gce_sd_configs: + [ - ... ] + +# List of Kubernetes service discovery configurations. +kubernetes_sd_configs: + [ - ... ] + +# List of Marathon service discovery configurations. +marathon_sd_configs: + [ - ... ] + +# List of AirBnB's Nerve service discovery configurations. +nerve_sd_configs: + [ - ... ] + +# List of Zookeeper Serverset service discovery configurations. +serverset_sd_configs: + [ - ... ] + +# List of Triton service discovery configurations. +triton_sd_configs: + [ - ... ] + +# List of labeled statically configured targets for this job. +static_configs: + [ - ... ] + +# List of target relabel configurations. +relabel_configs: + [ - ... ] + +# List of metric relabel configurations. +metric_relabel_configs: + [ - ... ] + +# Per-scrape limit on number of scraped samples that will be accepted. +# If more than this number of samples are present after metric relabelling +# the entire scrape will be treated as failed. 0 means no limit. +[ sample_limit: | default = 0 ] +``` + +Where `` must be unique across all scrape configurations. + +### `` + +A `tls_config` allows configuring TLS connections. + +```yaml +# CA certificate to validate API server certificate with. +[ ca_file: ] + +# Certificate and key files for client cert authentication to the server. +[ cert_file: ] +[ key_file: ] + +# ServerName extension to indicate the name of the server. +# http://tools.ietf.org/html/rfc4366#section-3.1 +[ server_name: ] + +# Disable validation of the server certificate. +[ insecure_skip_verify: ] +``` + +### `` + +Azure SD configurations allow retrieving scrape targets from Azure VMs. + +The following meta labels are available on targets during relabeling: + +* `__meta_azure_machine_id`: the machine ID +* `__meta_azure_machine_location`: the location the machine runs in +* `__meta_azure_machine_name`: the machine name +* `__meta_azure_machine_os_type`: the machine operating system +* `__meta_azure_machine_private_ip`: the machine's private IP +* `__meta_azure_machine_resource_group`: the machine's resource group +* `__meta_azure_machine_tag_`: each tag value of the machine + +See below for the configuration options for Azure discovery: + +```yaml +# The information to access the Azure API. +# The subscription ID. +subscription_id: +# The tenant ID. +tenant_id: +# The client ID. +client_id: +# The client secret. +client_secret: + +# Refresh interval to re-read the instance list. +[ refresh_interval: | default = 300s ] + +# The port to scrape metrics from. If using the public IP address, this must +# instead be specified in the relabeling rule. +[ port: | default = 80 ] +``` + +### `` + +Consul SD configurations allow retrieving scrape targets from [Consul's](https://www.consul.io) +Catalog API. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_consul_address`: the address of the target +* `__meta_consul_dc`: the datacenter name for the target +* `__meta_consul_metadata_`: each node metadata key value of the target +* `__meta_consul_node`: the node name defined for the target +* `__meta_consul_service_address`: the service address of the target +* `__meta_consul_service_id`: the service ID of the target +* `__meta_consul_service_port`: the service port of the target +* `__meta_consul_service`: the name of the service the target belongs to +* `__meta_consul_tags`: the list of tags of the target joined by the tag separator + +```yaml +# The information to access the Consul API. It is to be defined +# as the Consul documentation requires. +[ server: | default = "localhost:8500" ] +[ token: ] +[ datacenter: ] +[ scheme: | default = "http" ] +[ username: ] +[ password: ] + +tls_config: + [ ] + +# A list of services for which targets are retrieved. If omitted, all services +# are scraped. +services: + [ - ] + +# See https://www.consul.io/api/catalog.html#list-nodes-for-service to know more +# about the possible filters that can be used. + +# An optional tag used to filter nodes for a given service. +[ tag: ] + +# Node metadata used to filter nodes for a given service. +[ node_meta: + [ : ... ] ] + +# The string by which Consul tags are joined into the tag label. +[ tag_separator: | default = , ] + +# Allow stale Consul results (see https://www.consul.io/api/index.html#consistency-modes). Will reduce load on Consul. +[ allow_stale: ] + +# The time after which the provided names are refreshed. +# On large setup it might be a good idea to increase this value because the catalog will change all the time. +[ refresh_interval: | default = 30s ] +``` + +Note that the IP number and port used to scrape the targets is assembled as +`<__meta_consul_address>:<__meta_consul_service_port>`. However, in some +Consul setups, the relevant address is in `__meta_consul_service_address`. +In those cases, you can use the [relabel](#relabel_config) +feature to replace the special `__address__` label. + +The [relabeling phase](#relabel_config) is the preferred and more powerful +way to filter services or nodes for a service based on arbitrary labels. For +users with thousands of services it can be more efficient to use the Consul API +directly which has basic support for filtering nodes (currently by node +metadata and a single tag). + +### `` + +A DNS-based service discovery configuration allows specifying a set of DNS +domain names which are periodically queried to discover a list of targets. The +DNS servers to be contacted are read from `/etc/resolv.conf`. + +This service discovery method only supports basic DNS A, AAAA and SRV record +queries, but not the advanced DNS-SD approach specified in +[RFC6763](https://tools.ietf.org/html/rfc6763). + +During the [relabeling phase](#relabel_config), the meta label +`__meta_dns_name` is available on each target and is set to the +record name that produced the discovered target. + +```yaml +# A list of DNS domain names to be queried. +names: + [ - ] + +# The type of DNS query to perform. +[ type: | default = 'SRV' ] + +# The port number used if the query type is not SRV. +[ port: ] + +# The time after which the provided names are refreshed. +[ refresh_interval: | default = 30s ] +``` + +Where `` is a valid DNS domain name. +Where `` is `SRV`, `A`, or `AAAA`. + +### `` + +EC2 SD configurations allow retrieving scrape targets from AWS EC2 +instances. The private IP address is used by default, but may be changed to +the public IP address with relabeling. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_ec2_availability_zone`: the availability zone in which the instance is running +* `__meta_ec2_instance_id`: the EC2 instance ID +* `__meta_ec2_instance_state`: the state of the EC2 instance +* `__meta_ec2_instance_type`: the type of the EC2 instance +* `__meta_ec2_private_ip`: the private IP address of the instance, if present +* `__meta_ec2_public_dns_name`: the public DNS name of the instance, if available +* `__meta_ec2_public_ip`: the public IP address of the instance, if available +* `__meta_ec2_subnet_id`: comma separated list of subnets IDs in which the instance is running, if available +* `__meta_ec2_tag_`: each tag value of the instance +* `__meta_ec2_vpc_id`: the ID of the VPC in which the instance is running, if available + +See below for the configuration options for EC2 discovery: + +```yaml +# The information to access the EC2 API. + +# The AWS Region. +region: + +# The AWS API keys. If blank, the environment variables `AWS_ACCESS_KEY_ID` +# and `AWS_SECRET_ACCESS_KEY` are used. +[ access_key: ] +[ secret_key: ] +# Named AWS profile used to connect to the API. +[ profile: ] + +# AWS Role ARN, an alternative to using AWS API keys. +[ role_arn: ] + +# Refresh interval to re-read the instance list. +[ refresh_interval: | default = 60s ] + +# The port to scrape metrics from. If using the public IP address, this must +# instead be specified in the relabeling rule. +[ port: | default = 80 ] + +# Filters can be used optionally to filter the instance list by other criteria. +# Available filter criteria can be found here: +# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html +# Filter API documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Filter.html +filters: + [ - name: + values: , [...] ] +``` + +The [relabeling phase](#relabel_config) is the preferred and more powerful +way to filter targets based on arbitrary labels. For users with thousands of +instances it can be more efficient to use the EC2 API directly which has +support for filtering instances. + +### `` + +OpenStack SD configurations allow retrieving scrape targets from OpenStack Nova +instances. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_openstack_instance_id`: the OpenStack instance ID. +* `__meta_openstack_instance_name`: the OpenStack instance name. +* `__meta_openstack_instance_status`: the status of the OpenStack instance. +* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. +* `__meta_openstack_public_ip`: the public IP of the OpenStack instance. +* `__meta_openstack_private_ip`: the private IP of the OpenStack instance. +* `__meta_openstack_tag_`: each tag value of the instance. + +#### `instance` + +The `instance` role discovers one target per Nova instance. The target +address defaults to the first private IP address of the instance. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_openstack_instance_id`: the OpenStack instance ID. +* `__meta_openstack_instance_name`: the OpenStack instance name. +* `__meta_openstack_instance_status`: the status of the OpenStack instance. +* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. +* `__meta_openstack_public_ip`: the public IP of the OpenStack instance. +* `__meta_openstack_private_ip`: the private IP of the OpenStack instance. +* `__meta_openstack_tag_`: each tag value of the instance. + +See below for the configuration options for OpenStack discovery: + +```yaml +# The information to access the OpenStack API. + +# The OpenStack role of entities that should be discovered. +role: + +# The OpenStack Region. +region: + +# identity_endpoint specifies the HTTP endpoint that is required to work with +# the Identity API of the appropriate version. While it's ultimately needed by +# all of the identity services, it will often be populated by a provider-level +# function. +[ identity_endpoint: ] + +# username is required if using Identity V2 API. Consult with your provider's +# control panel to discover your account's username. In Identity V3, either +# userid or a combination of username and domain_id or domain_name are needed. +[ username: ] +[ userid: ] + +# password for the Identity V2 and V3 APIs. Consult with your provider's +# control panel to discover your account's preferred method of authentication. +[ password: ] + +# At most one of domain_id and domain_name must be provided if using username +# with Identity V3. Otherwise, either are optional. +[ domain_name: ] +[ domain_id: ] + +# The project_id and project_name fields are optional for the Identity V2 API. +# Some providers allow you to specify a project_name instead of the project_id. +# Some require both. Your provider's authentication policies will determine +# how these fields influence authentication. +[ project_name: ] +[ project_id: ] + +# Refresh interval to re-read the instance list. +[ refresh_interval: | default = 60s ] + +# The port to scrape metrics from. If using the public IP address, this must +# instead be specified in the relabeling rule. +[ port: | default = 80 ] +``` + +### `` + +File-based service discovery provides a more generic way to configure static targets +and serves as an interface to plug in custom service discovery mechanisms. + +It reads a set of files containing a list of zero or more +``s. Changes to all defined files are detected via disk watches +and applied immediately. Files may be provided in YAML or JSON format. Only +changes resulting in well-formed target groups are applied. + +The JSON file must contain a list of static configs, using this format: + +```yaml +[ + { + "targets": [ "", ... ], + "labels": { + "": "", ... + } + }, + ... +] +``` + +As a fallback, the file contents are also re-read periodically at the specified +refresh interval. + +Each target has a meta label `__meta_filepath` during the +[relabeling phase](#relabel_config). Its value is set to the +filepath from which the target was extracted. + +There is a list of +[integrations](https://prometheus.io/docs/operating/integrations/#file-service-discovery) with this +discovery mechanism. + +```yaml +# Patterns for files from which target groups are extracted. +files: + [ - ... ] + +# Refresh interval to re-read the files. +[ refresh_interval: | default = 5m ] +``` + +Where `` may be a path ending in `.json`, `.yml` or `.yaml`. The last path segment +may contain a single `*` that matches any character sequence, e.g. `my/path/tg_*.json`. + +### `` + +[GCE](https://cloud.google.com/compute/) SD configurations allow retrieving scrape targets from GCP GCE instances. +The private IP address is used by default, but may be changed to the public IP +address with relabeling. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_gce_instance_name`: the name of the instance +* `__meta_gce_label_`: each GCE label of the instance +* `__meta_gce_machine_type`: full or partial URL of the machine type of the instance +* `__meta_gce_metadata_`: each metadata item of the instance +* `__meta_gce_network`: the network URL of the instance +* `__meta_gce_private_ip`: the private IP address of the instance +* `__meta_gce_project`: the GCP project in which the instance is running +* `__meta_gce_public_ip`: the public IP address of the instance, if present +* `__meta_gce_subnetwork`: the subnetwork URL of the instance +* `__meta_gce_tags`: comma separated list of instance tags +* `__meta_gce_zone`: the GCE zone URL in which the instance is running + +See below for the configuration options for GCE discovery: + +```yaml +# The information to access the GCE API. + +# The GCP Project +project: + +# The zone of the scrape targets. If you need multiple zones use multiple +# gce_sd_configs. +zone: + +# Filter can be used optionally to filter the instance list by other criteria +# Syntax of this filter string is described here in the filter query parameter section: +# https://cloud.google.com/compute/docs/reference/latest/instances/list +[ filter: ] + +# Refresh interval to re-read the instance list +[ refresh_interval: | default = 60s ] + +# The port to scrape metrics from. If using the public IP address, this must +# instead be specified in the relabeling rule. +[ port: | default = 80 ] + +# The tag separator is used to separate the tags on concatenation +[ tag_separator: | default = , ] +``` + +Credentials are discovered by the Google Cloud SDK default client by looking +in the following places, preferring the first location found: + +1. a JSON file specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable +2. a JSON file in the well-known path `$HOME/.config/gcloud/application_default_credentials.json` +3. fetched from the GCE metadata server + +If Prometheus is running within GCE, the service account associated with the +instance it is running on should have at least read-only permissions to the +compute resources. If running outside of GCE make sure to create an appropriate +service account and place the credential file in one of the expected locations. + +### `` + +Kubernetes SD configurations allow retrieving scrape targets from +[Kubernetes'](http://kubernetes.io/) REST API and always staying synchronized with +the cluster state. + +One of the following `role` types can be configured to discover targets: + +#### `node` + +The `node` role discovers one target per cluster node with the address defaulting +to the Kubelet's HTTP port. +The target address defaults to the first existing address of the Kubernetes +node object in the address type order of `NodeInternalIP`, `NodeExternalIP`, +`NodeLegacyHostIP`, and `NodeHostName`. + +Available meta labels: + +* `__meta_kubernetes_node_name`: The name of the node object. +* `__meta_kubernetes_node_label_`: Each label from the node object. +* `__meta_kubernetes_node_annotation_`: Each annotation from the node object. +* `__meta_kubernetes_node_address_`: The first address for each node address type, if it exists. + +In addition, the `instance` label for the node will be set to the node name +as retrieved from the API server. + +#### `service` + +The `service` role discovers a target for each service port for each service. +This is generally useful for blackbox monitoring of a service. +The address will be set to the Kubernetes DNS name of the service and respective +service port. + +Available meta labels: + +* `__meta_kubernetes_namespace`: The namespace of the service object. +* `__meta_kubernetes_service_name`: The name of the service object. +* `__meta_kubernetes_service_label_`: The label of the service object. +* `__meta_kubernetes_service_annotation_`: The annotation of the service object. +* `__meta_kubernetes_service_port_name`: Name of the service port for the target. +* `__meta_kubernetes_service_port_number`: Number of the service port for the target. +* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target. + +#### `pod` + +The `pod` role discovers all pods and exposes their containers as targets. For each declared +port of a container, a single target is generated. If a container has no specified ports, +a port-free target per container is created for manually adding a port via relabeling. + +Available meta labels: + +* `__meta_kubernetes_namespace`: The namespace of the pod object. +* `__meta_kubernetes_pod_name`: The name of the pod object. +* `__meta_kubernetes_pod_ip`: The pod IP of the pod object. +* `__meta_kubernetes_pod_label_`: The label of the pod object. +* `__meta_kubernetes_pod_annotation_`: The annotation of the pod object. +* `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_port_name`: Name of the container port. +* `__meta_kubernetes_pod_container_port_number`: Number of the container port. +* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container port. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready state. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. +* `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. +* `__meta_kubernetes_pod_uid`: The UID of the pod object. +* `__meta_kubernetes_pod_controller_kind`: Object kind of the pod controller. +* `__meta_kubernetes_pod_controller_name`: Name of the pod controller. + +#### `endpoints` + +The `endpoints` role discovers targets from listed endpoints of a service. For each endpoint +address one target is discovered per port. If the endpoint is backed by a pod, all +additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well. + +Available meta labels: + +* `__meta_kubernetes_namespace`: The namespace of the endpoints object. +* `__meta_kubernetes_endpoints_name`: The names of the endpoints object. +* For all targets discovered directly from the endpoints list (those not additionally inferred + from underlying pods), the following labels are attached: + * `__meta_kubernetes_endpoint_ready`: Set to `true` or `false` for the endpoint's ready state. + * `__meta_kubernetes_endpoint_port_name`: Name of the endpoint port. + * `__meta_kubernetes_endpoint_port_protocol`: Protocol of the endpoint port. + * `__meta_kubernetes_endpoint_address_target_kind`: Kind of the endpoint address target. + * `__meta_kubernetes_endpoint_address_target_name`: Name of the endpoint address target. +* If the endpoints belong to a service, all labels of the `role: service` discovery are attached. +* For all targets backed by a pod, all labels of the `role: pod` discovery are attached. + +#### `ingress` + +The `ingress` role discovers a target for each path of each ingress. +This is generally useful for blackbox monitoring of an ingress. +The address will be set to the host specified in the ingress spec. + +Available meta labels: + +* `__meta_kubernetes_namespace`: The namespace of the ingress object. +* `__meta_kubernetes_ingress_name`: The name of the ingress object. +* `__meta_kubernetes_ingress_label_`: The label of the ingress object. +* `__meta_kubernetes_ingress_annotation_`: The annotation of the ingress object. +* `__meta_kubernetes_ingress_scheme`: Protocol scheme of ingress, `https` if TLS + config is set. Defaults to `http`. +* `__meta_kubernetes_ingress_path`: Path from ingress spec. Defaults to `/`. + +See below for the configuration options for Kubernetes discovery: + +```yaml +# The information to access the Kubernetes API. + +# The API server addresses. If left empty, Prometheus is assumed to run inside +# of the cluster and will discover API servers automatically and use the pod's +# CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. +[ api_server: ] + +# The Kubernetes role of entities that should be discovered. +role: + +# Optional authentication information used to authenticate to the API server. +# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are +# mutually exclusive. +# password and password_file are mutually exclusive. + +# Optional HTTP basic authentication information. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Optional bearer token authentication information. +[ bearer_token: ] + +# Optional bearer token file authentication information. +[ bearer_token_file: ] + +# TLS configuration. +tls_config: + [ ] + +# Optional namespace discovery. If omitted, all namespaces are used. +namespaces: + names: + [ - ] +``` + +Where `` must be `endpoints`, `service`, `pod`, `node`, or +`ingress`. + +See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml) +for a detailed example of configuring Prometheus for Kubernetes. + +You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator), +which automates the Prometheus setup on top of Kubernetes. + +### `` + +Marathon SD configurations allow retrieving scrape targets using the +[Marathon](https://mesosphere.github.io/marathon/) REST API. Prometheus +will periodically check the REST endpoint for currently running tasks and +create a target group for every app that has at least one healthy task. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_marathon_app`: the name of the app (with slashes replaced by dashes) +* `__meta_marathon_image`: the name of the Docker image used (if available) +* `__meta_marathon_task`: the ID of the Mesos task +* `__meta_marathon_app_label_`: any Marathon labels attached to the app +* `__meta_marathon_port_definition_label_`: the port definition labels +* `__meta_marathon_port_mapping_label_`: the port mapping labels +* `__meta_marathon_port_index`: the port index number (e.g. `1` for `PORT1`) + +See below for the configuration options for Marathon discovery: + +```yaml +# List of URLs to be used to contact Marathon servers. +# You need to provide at least one server URL. +servers: + - + +# Polling interval +[ refresh_interval: | default = 30s ] + +# Optional authentication information for token-based authentication +# https://docs.mesosphere.com/1.11/security/ent/iam-api/#passing-an-authentication-token +# It is mutually exclusive with `auth_token_file` and other authentication mechanisms. +[ auth_token: ] + +# Optional authentication information for token-based authentication +# https://docs.mesosphere.com/1.11/security/ent/iam-api/#passing-an-authentication-token +# It is mutually exclusive with `auth_token` and other authentication mechanisms. +[ auth_token_file: ] + +# Sets the `Authorization` header on every request with the +# configured username and password. +# This is mutually exclusive with other authentication mechanisms. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every request with +# the configured bearer token. It is mutually exclusive with `bearer_token_file` and other authentication mechanisms. +# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token` instead. +[ bearer_token: ] + +# Sets the `Authorization` header on every request with the bearer token +# read from the configured file. It is mutually exclusive with `bearer_token` and other authentication mechanisms. +# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token_file` instead. +[ bearer_token_file: /path/to/bearer/token/file ] + +# TLS configuration for connecting to marathon servers +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] +``` + +By default every app listed in Marathon will be scraped by Prometheus. If not all +of your services provide Prometheus metrics, you can use a Marathon label and +Prometheus relabeling to control which instances will actually be scraped. Also +by default all apps will show up as a single job in Prometheus (the one specified +in the configuration file), which can also be changed using relabeling. + +### `` + +Nerve SD configurations allow retrieving scrape targets from [AirBnB's Nerve] +(https://github.com/airbnb/nerve) which are stored in +[Zookeeper](https://zookeeper.apache.org/). + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_nerve_path`: the full path to the endpoint node in Zookeeper +* `__meta_nerve_endpoint_host`: the host of the endpoint +* `__meta_nerve_endpoint_port`: the port of the endpoint +* `__meta_nerve_endpoint_name`: the name of the endpoint + +```yaml +# The Zookeeper servers. +servers: + - +# Paths can point to a single service, or the root of a tree of services. +paths: + - +[ timeout: | default = 10s ] +``` + +### `` + +Serverset SD configurations allow retrieving scrape targets from [Serversets] +(https://github.com/twitter/finagle/tree/master/finagle-serversets) which are +stored in [Zookeeper](https://zookeeper.apache.org/). Serversets are commonly +used by [Finagle](https://twitter.github.io/finagle/) and +[Aurora](http://aurora.apache.org/). + +The following meta labels are available on targets during relabeling: + +* `__meta_serverset_path`: the full path to the serverset member node in Zookeeper +* `__meta_serverset_endpoint_host`: the host of the default endpoint +* `__meta_serverset_endpoint_port`: the port of the default endpoint +* `__meta_serverset_endpoint_host_`: the host of the given endpoint +* `__meta_serverset_endpoint_port_`: the port of the given endpoint +* `__meta_serverset_shard`: the shard number of the member +* `__meta_serverset_status`: the status of the member + +```yaml +# The Zookeeper servers. +servers: + - +# Paths can point to a single serverset, or the root of a tree of serversets. +paths: + - +[ timeout: | default = 10s ] +``` + +Serverset data must be in the JSON format, the Thrift format is not currently supported. + +### `` + +[Triton](https://github.com/joyent/triton) SD configurations allow retrieving +scrape targets from [Container Monitor](https://github.com/joyent/rfd/blob/master/rfd/0027/README.md) +discovery endpoints. + +The following meta labels are available on targets during relabeling: + +* `__meta_triton_machine_id`: the UUID of the target container +* `__meta_triton_machine_alias`: the alias of the target container +* `__meta_triton_machine_image`: the target containers image type +* `__meta_triton_machine_server_id`: the server UUID for the target container + +```yaml +# The information to access the Triton discovery API. + +# The account to use for discovering new target containers. +account: + +# The DNS suffix which should be applied to target containers. +dns_suffix: + +# The Triton discovery endpoint (e.g. 'cmon.us-east-3b.triton.zone'). This is +# often the same value as dns_suffix. +endpoint: + +# The port to use for discovery and metric scraping. +[ port: | default = 9163 ] + +# The interval which should should be used for refreshing target containers. +[ refresh_interval: | default = 60s ] + +# The Triton discovery API version. +[ version: | default = 1 ] + +# TLS configuration. +tls_config: + [ ] +``` + +### `` + +A `static_config` allows specifying a list of targets and a common label set +for them. It is the canonical way to specify static targets in a scrape +configuration. + +```yaml +# The targets specified by the static config. +targets: + [ - '' ] + +# Labels assigned to all metrics scraped from the targets. +labels: + [ : ... ] +``` + +### `` + +Relabeling is a powerful tool to dynamically rewrite the label set of a target before +it gets scraped. Multiple relabeling steps can be configured per scrape configuration. +They are applied to the label set of each target in order of their appearance +in the configuration file. + +Initially, aside from the configured per-target labels, a target's `job` +label is set to the `job_name` value of the respective scrape configuration. +The `__address__` label is set to the `:` address of the target. +After relabeling, the `instance` label is set to the value of `__address__` by default if +it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels +are set to the scheme and metrics path of the target respectively. The `__param_` +label is set to the value of the first passed URL parameter called ``. + +Additional labels prefixed with `__meta_` may be available during the +relabeling phase. They are set by the service discovery mechanism that provided +the target and vary between mechanisms. + +Labels starting with `__` will be removed from the label set after relabeling is completed. + +If a relabeling step needs to store a label value only temporarily (as the +input to a subsequent relabeling step), use the `__tmp` label name prefix. This +prefix is guaranteed to never be used by Prometheus itself. + +```yaml +# The source labels select values from existing labels. Their content is concatenated +# using the configured separator and matched against the configured regular expression +# for the replace, keep, and drop actions. +[ source_labels: '[' [, ...] ']' ] + +# Separator placed between concatenated source label values. +[ separator: | default = ; ] + +# Label to which the resulting value is written in a replace action. +# It is mandatory for replace actions. Regex capture groups are available. +[ target_label: ] + +# Regular expression against which the extracted value is matched. +[ regex: | default = (.*) ] + +# Modulus to take of the hash of the source label values. +[ modulus: ] + +# Replacement value against which a regex replace is performed if the +# regular expression matches. Regex capture groups are available. +[ replacement: | default = $1 ] + +# Action to perform based on regex matching. +[ action: | default = replace ] +``` + +`` is any valid +[RE2 regular expression](https://github.com/google/re2/wiki/Syntax). It is +required for the `replace`, `keep`, `drop`, `labelmap`,`labeldrop` and `labelkeep` actions. The regex is +anchored on both ends. To un-anchor the regex, use `.*.*`. + +`` determines the relabeling action to take: + +* `replace`: Match `regex` against the concatenated `source_labels`. Then, set + `target_label` to `replacement`, with match group references + (`${1}`, `${2}`, ...) in `replacement` substituted by their value. If `regex` + does not match, no replacement takes place. +* `keep`: Drop targets for which `regex` does not match the concatenated `source_labels`. +* `drop`: Drop targets for which `regex` matches the concatenated `source_labels`. +* `hashmod`: Set `target_label` to the `modulus` of a hash of the concatenated `source_labels`. +* `labelmap`: Match `regex` against all label names. Then copy the values of the matching labels + to label names given by `replacement` with match group references + (`${1}`, `${2}`, ...) in `replacement` substituted by their value. +* `labeldrop`: Match `regex` against all label names. Any label that matches will be + removed from the set of labels. +* `labelkeep`: Match `regex` against all label names. Any label that does not match will be + removed from the set of labels. + +Care must be taken with `labeldrop` and `labelkeep` to ensure that metrics are still uniquely labeled +once the labels are removed. + +### `` + +Metric relabeling is applied to samples as the last step before ingestion. It +has the same configuration format and actions as target relabeling. Metric +relabeling does not apply to automatically generated timeseries such as `up`. + +One use for this is to blacklist time series that are too expensive to ingest. + +### `` + +Alert relabeling is applied to alerts before they are sent to the Alertmanager. +It has the same configuration format and actions as target relabeling. Alert +relabeling is applied after external labels. + +One use for this is ensuring a HA pair of Prometheus servers with different +external labels send identical alerts. + +### `` + +An `alertmanager_config` section specifies Alertmanager instances the Prometheus server sends +alerts to. It also provides parameters to configure how to communicate with these Alertmanagers. + +Alertmanagers may be statically configured via the `static_configs` parameter or +dynamically discovered using one of the supported service-discovery mechanisms. + +Additionally, `relabel_configs` allow selecting Alertmanagers from discovered +entities and provide advanced modifications to the used API path, which is exposed +through the `__alerts_path__` label. + +```yaml +# Per-target Alertmanager timeout when pushing alerts. +[ timeout: | default = 10s ] + +# Prefix for the HTTP path alerts are pushed to. +[ path_prefix: | default = / ] + +# Configures the protocol scheme used for requests. +[ scheme: | default = http ] + +# Sets the `Authorization` header on every request with the +# configured username and password. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every request with +# the configured bearer token. It is mutually exclusive with `bearer_token_file`. +[ bearer_token: ] + +# Sets the `Authorization` header on every request with the bearer token +# read from the configured file. It is mutually exclusive with `bearer_token`. +[ bearer_token_file: /path/to/bearer/token/file ] + +# Configures the scrape request's TLS settings. +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] + +# List of Azure service discovery configurations. +azure_sd_configs: + [ - ... ] + +# List of Consul service discovery configurations. +consul_sd_configs: + [ - ... ] + +# List of DNS service discovery configurations. +dns_sd_configs: + [ - ... ] + +# List of EC2 service discovery configurations. +ec2_sd_configs: + [ - ... ] + +# List of file service discovery configurations. +file_sd_configs: + [ - ... ] + +# List of GCE service discovery configurations. +gce_sd_configs: + [ - ... ] + +# List of Kubernetes service discovery configurations. +kubernetes_sd_configs: + [ - ... ] + +# List of Marathon service discovery configurations. +marathon_sd_configs: + [ - ... ] + +# List of AirBnB's Nerve service discovery configurations. +nerve_sd_configs: + [ - ... ] + +# List of Zookeeper Serverset service discovery configurations. +serverset_sd_configs: + [ - ... ] + +# List of Triton service discovery configurations. +triton_sd_configs: + [ - ... ] + +# List of labeled statically configured Alertmanagers. +static_configs: + [ - ... ] + +# List of Alertmanager relabel configurations. +relabel_configs: + [ - ... ] +``` + +### `` + +`write_relabel_configs` is relabeling applied to samples before sending them +to the remote endpoint. Write relabeling is applied after external labels. This +could be used to limit which samples are sent. + +There is a [small demo](/documentation/examples/remote_storage) of how to use +this functionality. + +```yaml +# The URL of the endpoint to send samples to. +url: + +# Timeout for requests to the remote write endpoint. +[ remote_timeout: | default = 30s ] + +# List of remote write relabel configurations. +write_relabel_configs: + [ - ... ] + +# Sets the `Authorization` header on every remote write request with the +# configured username and password. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every remote write request with +# the configured bearer token. It is mutually exclusive with `bearer_token_file`. +[ bearer_token: ] + +# Sets the `Authorization` header on every remote write request with the bearer token +# read from the configured file. It is mutually exclusive with `bearer_token`. +[ bearer_token_file: /path/to/bearer/token/file ] + +# Configures the remote write request's TLS settings. +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] + +# Configures the queue used to write to remote storage. +queue_config: + # Number of samples to buffer per shard before we start dropping them. + [ capacity: | default = 100000 ] + # Maximum number of shards, i.e. amount of concurrency. + [ max_shards: | default = 1000 ] + # Maximum number of samples per send. + [ max_samples_per_send: | default = 100] + # Maximum time a sample will wait in buffer. + [ batch_send_deadline: | default = 5s ] + # Maximum number of times to retry a batch on recoverable errors. + [ max_retries: | default = 10 ] + # Initial retry delay. Gets doubled for every retry. + [ min_backoff: | default = 30ms ] + # Maximum retry delay. + [ max_backoff: | default = 100ms ] + +``` + +There is a list of +[integrations](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) +with this feature. + +### `` + +```yaml +# The URL of the endpoint to query from. +url: + +# An optional list of equality matchers which have to be +# present in a selector to query the remote read endpoint. +required_matchers: + [ : ... ] + +# Timeout for requests to the remote read endpoint. +[ remote_timeout: | default = 1m ] + +# Whether reads should be made for queries for time ranges that +# the local storage should have complete data for. +[ read_recent: | default = false ] + +# Sets the `Authorization` header on every remote read request with the +# configured username and password. +# password and password_file are mutually exclusive. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Sets the `Authorization` header on every remote read request with +# the configured bearer token. It is mutually exclusive with `bearer_token_file`. +[ bearer_token: ] + +# Sets the `Authorization` header on every remote read request with the bearer token +# read from the configured file. It is mutually exclusive with `bearer_token`. +[ bearer_token_file: /path/to/bearer/token/file ] + +# Configures the remote read request's TLS settings. +tls_config: + [ ] + +# Optional proxy URL. +[ proxy_url: ] +``` + +There is a list of +[integrations](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) +with this feature. diff --git a/src/prometheus/docs/configuration/index.md b/src/prometheus/docs/configuration/index.md new file mode 100644 index 0000000..5cfaf2a --- /dev/null +++ b/src/prometheus/docs/configuration/index.md @@ -0,0 +1,4 @@ +--- +title: Configuration +sort_rank: 3 +--- diff --git a/src/prometheus/docs/configuration/recording_rules.md b/src/prometheus/docs/configuration/recording_rules.md new file mode 100644 index 0000000..78aa0ce --- /dev/null +++ b/src/prometheus/docs/configuration/recording_rules.md @@ -0,0 +1,120 @@ +--- +title: Recording rules +sort_rank: 2 +--- + +# Defining recording rules + +## Configuring rules + +Prometheus supports two types of rules which may be configured and then +evaluated at regular intervals: recording rules and [alerting +rules](alerting_rules.md). To include rules in Prometheus, create a file +containing the necessary rule statements and have Prometheus load the file via +the `rule_files` field in the [Prometheus configuration](configuration.md). +Rule files use YAML. + +The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus +process. The changes are only applied if all rule files are well-formatted. + +## Syntax-checking rules + +To quickly check whether a rule file is syntactically correct without starting +a Prometheus server, install and run Prometheus's `promtool` command-line +utility tool: + +```bash +go get github.com/prometheus/prometheus/cmd/promtool +promtool check rules /path/to/example.rules.yml +``` + +When the file is syntactically valid, the checker prints a textual +representation of the parsed rules to standard output and then exits with +a `0` return status. + +If there are any syntax errors or invalid input arguments, it prints an error +message to standard error and exits with a `1` return status. + +## Recording rules + +Recording rules allow you to precompute frequently needed or computationally +expensive expressions and save their result as a new set of time series. +Querying the precomputed result will then often be much faster than executing +the original expression every time it is needed. This is especially useful for +dashboards, which need to query the same expression repeatedly every time they +refresh. + +Recording and alerting rules exist in a rule group. Rules within a group are +run sequentially at a regular interval. + +The syntax of a rule file is: + +```yaml +groups: + [ - ] +``` + +A simple example rules file would be: + +```yaml +groups: + - name: example + rules: + - record: job:http_inprogress_requests:sum + expr: sum(http_inprogress_requests) by (job) +``` + +### `` +``` +# The name of the group. Must be unique within a file. +name: + +# How often rules in the group are evaluated. +[ interval: | default = global.evaluation_interval ] + +rules: + [ - ... ] +``` + +### `` + +The syntax for recording rules is: + +``` +# The name of the time series to output to. Must be a valid metric name. +record: + +# The PromQL expression to evaluate. Every evaluation cycle this is +# evaluated at the current time, and the result recorded as a new set of +# time series with the metric name as given by 'record'. +expr: + +# Labels to add or overwrite before storing the result. +labels: + [ : ] +``` + +The syntax for alerting rules is: + +``` +# The name of the alert. Must be a valid metric name. +alert: + +# The PromQL expression to evaluate. Every evaluation cycle this is +# evaluated at the current time, and all resultant time series become +# pending/firing alerts. +expr: + +# Alerts are considered firing once they have been returned for this long. +# Alerts which have not yet fired for long enough are considered pending. +[ for: | default = 0s ] + +# Labels to add or overwrite for each alert. +labels: + [ : ] + +# Annotations to add to each alert. +annotations: + [ : ] +``` + diff --git a/src/prometheus/docs/configuration/template_examples.md b/src/prometheus/docs/configuration/template_examples.md new file mode 100644 index 0000000..0fe7e2e --- /dev/null +++ b/src/prometheus/docs/configuration/template_examples.md @@ -0,0 +1,116 @@ +--- +title: Template examples +sort_rank: 4 +--- + +# Template examples + +Prometheus supports templating in the annotations and labels of alerts, +as well as in served console pages. Templates have the ability to run +queries against the local database, iterate over data, use conditionals, +format data, etc. The Prometheus templating language is based on the [Go +templating](http://golang.org/pkg/text/template/) system. + +## Simple alert field templates + +``` +alert: InstanceDown +expr: up == 0 +for: 5m +labels: + - severity: page +annotations: + summary: "Instance {{$labels.instance}} down" + description: "{{$labels.instance}} of job {{$labels.job}} has been down for more than 5 minutes." +``` + +Alert field templates will be executed during every rule iteration for each +alert that fires, so keep any queries and templates lightweight. If you have a +need for more complicated templates for alerts, it is recommended to link to a +console instead. + +## Simple iteration + +This displays a list of instances, and whether they are up: + +```go +{{ range query "up" }} + {{ .Labels.instance }} {{ .Value }} +{{ end }} +``` + +The special `.` variable contains the value of the current sample for each loop iteration. + +## Display one value + +```go +{{ with query "some_metric{instance='someinstance'}" }} + {{ . | first | value | humanize }} +{{ end }} +``` + +Go and Go's templating language are both strongly typed, so one must check that +samples were returned to avoid an execution error. For example this could +happen if a scrape or rule evaluation has not run yet, or a host was down. + +The included `prom_query_drilldown` template handles this, allows for +formatting of results, and linking to the [expression browser](https://prometheus.io/docs/visualization/browser/). + +## Using console URL parameters + +```go +{{ with printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance | query }} + {{ . | first | value | humanize1024}}B +{{ end }} +``` + +If accessed as `console.html?instance=hostname`, `.Params.instance` will evaluate to `hostname`. + +## Advanced iteration + +```html +
    PrometheusUpIngested SamplesMemory
    {{ .Labels.instance }}Yes{{ else }} class="alert-danger">No{{ end }}{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}
    No devices found.
    +{{ range printf "node_network_receive_bytes{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device"}} + + + + + + + + + {{ end }} +
    {{ .Labels.device }}
    Received{{ with printf "rate(node_network_receive_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device | query }}{{ . | first | value | humanize }}B/s{{end}}
    Transmitted{{ with printf "rate(node_network_transmit_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device | query }}{{ . | first | value | humanize }}B/s{{end}}
    +``` + +Here we iterate over all network devices and display the network traffic for each. + +As the `range` action does not specify a variable, `.Params.instance` is not +available inside the loop as `.` is now the loop variable. + +## Defining reusable templates + +Prometheus supports defining templates that can be reused. This is particularly +powerful when combined with +[console library](template_reference.md#console-templates) support, allowing +sharing of templates across consoles. + +```go +{{/* Define the template */}} +{{define "myTemplate"}} + do something +{{end}} + +{{/* Use the template */}} +{{template "myTemplate"}} +``` + +Templates are limited to one argument. The `args` function can be used to wrap multiple arguments. + +```go +{{define "myMultiArgTemplate"}} + First argument: {{.arg0}} + Second argument: {{.arg1}} +{{end}} +{{template "myMultiArgTemplate" (args 1 2)}} +``` diff --git a/src/prometheus/docs/configuration/template_reference.md b/src/prometheus/docs/configuration/template_reference.md new file mode 100644 index 0000000..fada2ca --- /dev/null +++ b/src/prometheus/docs/configuration/template_reference.md @@ -0,0 +1,114 @@ +--- +title: Template reference +sort_rank: 5 +--- + +# Template reference + +Prometheus supports templating in the annotations and labels of alerts, +as well as in served console pages. Templates have the ability to run +queries against the local database, iterate over data, use conditionals, +format data, etc. The Prometheus templating language is based on the [Go +templating](http://golang.org/pkg/text/template/) system. + +## Data Structures + +The primary data structure for dealing with time series data is the sample, defined as: + +```go +type sample struct { + Labels map[string]string + Value float64 +} +``` + +The metric name of the sample is encoded in a special `__name__` label in the `Labels` map. + +`[]sample` means a list of samples. + +`interface{}` in Go is similar to a void pointer in C. + +## Functions + +In addition to the [default +functions](http://golang.org/pkg/text/template/#hdr-Functions) provided by Go +templating, Prometheus provides functions for easier processing of query +results in templates. + +If functions are used in a pipeline, the pipeline value is passed as the last argument. + +### Queries + +| Name | Arguments | Returns | Notes | +| ------------- | ------------- | -------- | -------- | +| query | query string | []sample | Queries the database, does not support returning range vectors. | +| first | []sample | sample | Equivalent to `index a 0` | +| label | label, sample | string | Equivalent to `index sample.Labels label` | +| value | sample | float64 | Equivalent to `sample.Value` | +| sortByLabel | label, []samples | []sample | Sorts the samples by the given label. Is stable. | + +`first`, `label` and `value` are intended to make query results easily usable in pipelines. + +### Numbers + +| Name | Arguments | Returns | Notes | +| ------------- | --------------| --------| --------- | +| humanize | number | string | Converts a number to a more readable format, using [metric prefixes](http://en.wikipedia.org/wiki/Metric_prefix). +| humanize1024 | number | string | Like `humanize`, but uses 1024 as the base rather than 1000. | +| humanizeDuration | number | string | Converts a duration in seconds to a more readable format. | +| humanizeTimestamp | number | string | Converts a Unix timestamp in seconds to a more readable format. | + +Humanizing functions are intended to produce reasonable output for consumption +by humans, and are not guaranteed to return the same results between Prometheus +versions. + +### Strings + +| Name | Arguments | Returns | Notes | +| ------------- | ------------- | ------- | ----------- | +| title | string | string | [strings.Title](http://golang.org/pkg/strings/#Title), capitalises first character of each word.| +| toUpper | string | string | [strings.ToUpper](http://golang.org/pkg/strings/#ToUpper), converts all characters to upper case.| +| toLower | string | string | [strings.ToLower](http://golang.org/pkg/strings/#ToLower), converts all characters to lower case.| +| match | pattern, text | boolean | [regexp.MatchString](http://golang.org/pkg/regexp/#MatchString) Tests for a unanchored regexp match. | +| reReplaceAll | pattern, replacement, text | string | [Regexp.ReplaceAllString](http://golang.org/pkg/regexp/#Regexp.ReplaceAllString) Regexp substitution, unanchored. | +| graphLink | expr | string | Returns path to graph view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. | +| tableLink | expr | string | Returns path to tabular ("Console") view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. | + +### Others + +| Name | Arguments | Returns | Notes | +| ------------- | ------------- | ------- | ----------- | +| args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. | +| tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. | +| safeHtml | string | string | Marks string as HTML not requiring auto-escaping. | + +## Template type differences + +Each of the types of templates provide different information that can be used to +parameterize templates, and have a few other differences. + +### Alert field templates + +`.Value` and `.Labels` contain the alert value and labels. They are also exposed +as the `$value` and `$labels` variables for convenience. + +### Console templates + +Consoles are exposed on `/consoles/`, and sourced from the directory pointed to +by the `-web.console.templates` flag. + +Console templates are rendered with +[html/template](http://golang.org/pkg/html/template/), which provides +auto-escaping. To bypass the auto-escaping use the `safe*` functions., + +URL parameters are available as a map in `.Params`. To access multiple URL +parameters by the same name, `.RawParams` is a map of the list values for each +parameter. The URL path is available in `.Path`, excluding the `/consoles/` +prefix. + +Consoles also have access to all the templates defined with `{{define +"templateName"}}...{{end}}` found in `*.lib` files in the directory pointed to +by the `-web.console.libraries` flag. As this is a shared namespace, take care +to avoid clashes with other users. Template names beginning with `prom`, +`_prom`, and `__` are reserved for use by Prometheus, as are the functions +listed above. diff --git a/src/prometheus/docs/federation.md b/src/prometheus/docs/federation.md new file mode 100644 index 0000000..283f044 --- /dev/null +++ b/src/prometheus/docs/federation.md @@ -0,0 +1,81 @@ +--- +title: Federation +sort_rank: 6 +--- + +# Federation + +Federation allows a Prometheus server to scrape selected time series from +another Prometheus server. + +## Use cases + +There are different use cases for federation. Commonly, it is used to either +achieve scalable Prometheus monitoring setups or to pull related metrics from +one service's Prometheus into another. + +### Hierarchical federation + +Hierarchical federation allows Prometheus to scale to environments with tens of +data centers and millions of nodes. In this use case, the federation topology +resembles a tree, with higher-level Prometheus servers collecting aggregated +time series data from a larger number of subordinated servers. + +For example, a setup might consist of many per-datacenter Prometheus servers +that collect data in high detail (instance-level drill-down), and a set of +global Prometheus servers which collect and store only aggregated data +(job-level drill-down) from those local servers. This provides an aggregate +global view and detailed local views. + +### Cross-service federation + +In cross-service federation, a Prometheus server of one service is configured +to scrape selected data from another service's Prometheus server to enable +alerting and queries against both datasets within a single server. + +For example, a cluster scheduler running multiple services might expose +resource usage information (like memory and CPU usage) about service instances +running on the cluster. On the other hand, a service running on that cluster +will only expose application-specific service metrics. Often, these two sets of +metrics are scraped by separate Prometheus servers. Using federation, the +Prometheus server containing service-level metrics may pull in the cluster +resource usage metrics about its specific service from the cluster Prometheus, +so that both sets of metrics can be used within that server. + +## Configuring federation + +On any given Prometheus server, the `/federate` endpoint allows retrieving the +current value for a selected set of time series in that server. At least one +`match[]` URL parameter must be specified to select the series to expose. Each +`match[]` argument needs to specify an +[instant vector selector](querying/basics.md#instant-vector-selectors) like +`up` or `{job="api-server"}`. If multiple `match[]` parameters are provided, +the union of all matched series is selected. + +To federate metrics from one server to another, configure your destination +Prometheus server to scrape from the `/federate` endpoint of a source server, +while also enabling the `honor_labels` scrape option (to not overwrite any +labels exposed by the source server) and passing in the desired `match[]` +parameters. For example, the following `scrape_config` federates any series +with the label `job="prometheus"` or a metric name starting with `job:` from +the Prometheus servers at `source-prometheus-{1,2,3}:9090` into the scraping +Prometheus: + +```yaml +- job_name: 'federate' + scrape_interval: 15s + + honor_labels: true + metrics_path: '/federate' + + params: + 'match[]': + - '{job="prometheus"}' + - '{__name__=~"job:.*"}' + + static_configs: + - targets: + - 'source-prometheus-1:9090' + - 'source-prometheus-2:9090' + - 'source-prometheus-3:9090' +``` diff --git a/src/prometheus/docs/getting_started.md b/src/prometheus/docs/getting_started.md new file mode 100644 index 0000000..5414c47 --- /dev/null +++ b/src/prometheus/docs/getting_started.md @@ -0,0 +1,267 @@ +--- +title: Getting started +sort_rank: 1 +--- + +# Getting started + +This guide is a "Hello World"-style tutorial which shows how to install, +configure, and use Prometheus in a simple example setup. You will download and run +Prometheus locally, configure it to scrape itself and an example application, +and then work with queries, rules, and graphs to make use of the collected time +series data. + +## Downloading and running Prometheus + +[Download the latest release](https://prometheus.io/download) of Prometheus for +your platform, then extract and run it: + +```bash +tar xvfz prometheus-*.tar.gz +cd prometheus-* +``` + +Before starting Prometheus, let's configure it. + +## Configuring Prometheus to monitor itself + +Prometheus collects metrics from monitored targets by scraping metrics HTTP +endpoints on these targets. Since Prometheus also exposes data in the same +manner about itself, it can also scrape and monitor its own health. + +While a Prometheus server that collects only data about itself is not very +useful in practice, it is a good starting example. Save the following basic +Prometheus configuration as a file named `prometheus.yml`: + +```yaml +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'codelab-monitor' + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['localhost:9090'] +``` + +For a complete specification of configuration options, see the +[configuration documentation](configuration/configuration.md). + +## Starting Prometheus + +To start Prometheus with your newly created configuration file, change to the +directory containing the Prometheus binary and run: + +```bash +# Start Prometheus. +# By default, Prometheus stores its database in ./data (flag --storage.tsdb.path). +./prometheus --config.file=prometheus.yml +``` + +Prometheus should start up. You should also be able to browse to a status page +about itself at [localhost:9090](http://localhost:9090). Give it a couple of +seconds to collect data about itself from its own HTTP metrics endpoint. + +You can also verify that Prometheus is serving metrics about itself by +navigating to its metrics endpoint: +[localhost:9090/metrics](http://localhost:9090/metrics) + +## Using the expression browser + +Let us try looking at some data that Prometheus has collected about itself. To +use Prometheus's built-in expression browser, navigate to +http://localhost:9090/graph and choose the "Console" view within the "Graph" +tab. + +As you can gather from [localhost:9090/metrics](http://localhost:9090/metrics), +one metric that Prometheus exports about itself is called +`prometheus_target_interval_length_seconds` (the actual amount of time between +target scrapes). Go ahead and enter this into the expression console: + +``` +prometheus_target_interval_length_seconds +``` + +This should return a number of different time series (along with the latest value +recorded for each), all with the metric name +`prometheus_target_interval_length_seconds`, but with different labels. These +labels designate different latency percentiles and target group intervals. + +If we were only interested in the 99th percentile latencies, we could use this +query to retrieve that information: + +``` +prometheus_target_interval_length_seconds{quantile="0.99"} +``` + +To count the number of returned time series, you could write: + +``` +count(prometheus_target_interval_length_seconds) +``` + +For more about the expression language, see the +[expression language documentation](querying/basics.md). + +## Using the graphing interface + +To graph expressions, navigate to http://localhost:9090/graph and use the "Graph" +tab. + +For example, enter the following expression to graph the per-second rate of chunks +being created in the self-scraped Prometheus: + +``` +rate(prometheus_tsdb_head_chunks_created_total[1m]) +``` + +Experiment with the graph range parameters and other settings. + +## Starting up some sample targets + +Let us make this more interesting and start some example targets for Prometheus +to scrape. + +The Go client library includes an example which exports fictional RPC latencies +for three services with different latency distributions. + +Ensure you have the [Go compiler installed](https://golang.org/doc/install) and +have a [working Go build environment](https://golang.org/doc/code.html) (with +correct `GOPATH`) set up. + +Download the Go client library for Prometheus and run three of these example +processes: + +```bash +# Fetch the client library code and compile example. +git clone https://github.com/prometheus/client_golang.git +cd client_golang/examples/random +go get -d +go build + +# Start 3 example targets in separate terminals: +./random -listen-address=:8080 +./random -listen-address=:8081 +./random -listen-address=:8082 +``` + +You should now have example targets listening on http://localhost:8080/metrics, +http://localhost:8081/metrics, and http://localhost:8082/metrics. + +## Configuring Prometheus to monitor the sample targets + +Now we will configure Prometheus to scrape these new targets. Let's group all +three endpoints into one job called `example-random`. However, imagine that the +first two endpoints are production targets, while the third one represents a +canary instance. To model this in Prometheus, we can add several groups of +endpoints to a single job, adding extra labels to each group of targets. In +this example, we will add the `group="production"` label to the first group of +targets, while adding `group="canary"` to the second. + +To achieve this, add the following job definition to the `scrape_configs` +section in your `prometheus.yml` and restart your Prometheus instance: + +```yaml +scrape_configs: + - job_name: 'example-random' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['localhost:8080', 'localhost:8081'] + labels: + group: 'production' + + - targets: ['localhost:8082'] + labels: + group: 'canary' +``` + +Go to the expression browser and verify that Prometheus now has information +about time series that these example endpoints expose, such as the +`rpc_durations_seconds` metric. + +## Configure rules for aggregating scraped data into new time series + +Though not a problem in our example, queries that aggregate over thousands of +time series can get slow when computed ad-hoc. To make this more efficient, +Prometheus allows you to prerecord expressions into completely new persisted +time series via configured recording rules. Let's say we are interested in +recording the per-second rate of example RPCs +(`rpc_durations_seconds_count`) averaged over all instances (but +preserving the `job` and `service` dimensions) as measured over a window of 5 +minutes. We could write this as: + +``` +avg(rate(rpc_durations_seconds_count[5m])) by (job, service) +``` + +Try graphing this expression. + +To record the time series resulting from this expression into a new metric +called `job_service:rpc_durations_seconds_count:avg_rate5m`, create a file +with the following recording rule and save it as `prometheus.rules.yml`: + +``` +groups: +- name: example + rules: + - record: job_service:rpc_durations_seconds_count:avg_rate5m + expr: avg(rate(rpc_durations_seconds_count[5m])) by (job, service) +``` + +To make Prometheus pick up this new rule, add a `rule_files` statement to the +`global` configuration section in your `prometheus.yml`. The config should now +look like this: + +```yaml +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # Evaluate rules every 15 seconds. + + # Attach these extra labels to all timeseries collected by this Prometheus instance. + external_labels: + monitor: 'codelab-monitor' + +rule_files: + - 'prometheus.rules.yml' + +scrape_configs: + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'example-random' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['localhost:8080', 'localhost:8081'] + labels: + group: 'production' + + - targets: ['localhost:8082'] + labels: + group: 'canary' +``` + +Restart Prometheus with the new configuration and verify that a new time series +with the metric name `job_service:rpc_durations_seconds_count:avg_rate5m` +is now available by querying it through the expression browser or graphing it. diff --git a/src/prometheus/docs/images/remote_integrations.png b/src/prometheus/docs/images/remote_integrations.png new file mode 100644 index 0000000000000000000000000000000000000000..ae263d136d81b17c0512dae941232911bd0d290e GIT binary patch literal 14508 zcmb8WbwE^K*EW2H25F>QB&55fkx)`WYUs{k5JVViXe32xq(nez1Y}4DkdW@0k&qUU z?s~`jx$ob5Ki~V;_s=;K`|Q2;I%}`J*0rzoT3=U#gouF%005H5nvb3V09FI$vo!%8 z=87+4_5%RefX9!N4gD;4a`1f&yJ)+=>A-;xn#uEJ;<7H^ zl}v#W@x6V9syo&aa1`(sYY)Gg9?lA918|QnIgh`|0~2^7caYD3d7u=#7t5Lq&P=+N z!i>BGT7YtVs5)ypu@VXJ0NAdVQ51sT2Pgp^kToB?6Hvf;!-Nb3UV&Dy7qQt2zWTGm zcbYuGh=ksuUjPyI87=Y&Ag7o0AGI!e{Z$kCZC?aHj00s=vI5jV?qf-RoPaX`X$%ll z-vQp^?=?rMDf}8Hvt|Oq{#XNI5}#Pc^yi3yBu2Lq@!?BsB=j1_P136&QB7&62W)&u z$|TBXh6@4i0938>$Q-~2TJYie+5!HgNhrL<{1cv2oW1NQLl~q=-P*|j7zAmL!7W*l zU?6POmEu1yRZa&K4RxzGYh~U2xkKi;8%V=vd06*4pf9kT2CeG6@igy4up2*()~AvI zzNo|e)1!ww1rAIe>Pi-!INvFNn^B}z%mB{f7t+UUb)qLHR6xWGFW~C3aWpA88lbi7 z#sJtz`c7ew5Gknv=FtPd9}odZpA`8b$eoI$RRUCpt2A=I_sb5R62MBrRuZ)-1^D17 zDG7W`a{B~oO(8*#DZsKiYu~X2TYMW9JaDc$ct`h}zXW^n6{q0t34Y6A`@MzK;#QvW zE4e5ZHQ0G&;17mI1CulZA*tZBBrDSW)02a@!(R+iU%qfMLf;{|^Ds*)2rC+Wh=rb) zL2#=5;(q-tx~n5WqDp%9{^rn2B02aZ1NnPl#PF79>e}+@8T#xjm&G)Ur{Ha_8O(_= z`Y73Zz*UY1`uA2i1};UQb~$DNF^uQ4RnnOrM#aQvGTk9gMI|NiY6sCJTaD<7GqQItj#CU)fLcrFWsmJ6KXpTV3nLIQno;$ZfmCa4aJr8PmFFA>qY<@K8B^p_vfSF@y?w%`c@A+Ork+%ZrCc2)p|cIX8s6|-WM3Bm z2~#3@Nb2Loc1RX!nPEtc^NE;Tu%rgGzvn!@!Nb1f&MkpS=V&a81uQTGXM`E43WOiA z;ya3;X{(lk8ko}O%_Tc_^hs7up#8#x-K2~m&U8~qMO!?yQ9?w{(G2Am7h0S1_tE7q z%k?@L>dJ`9Ee))nO393&pMvgq?Za_ zKg|k4P<~6IGjoV=&>0UBw#<{-jxbmF`D?tUH|lxj#cyevqmQxa>1v3ciL#?-%V8ZI z|~64hcK+u>k^CqDMvNhQV?b&?rNWJl`W203*+y!l8P%L5B~wVwk4ekRY!0JPzS z3$BPl1gKs5bj~>pCu;5`4q<4`<4(<{cMo#RH%7 zXZuEea5OQ=s4oQ}h#=2-R5U3e85BZ~ChEiwN0r+NS*YWC*olq|g~1^CBDCLe;=Fjb zHlg+@b@F;z2p!fjp0J}xe)I=<__8cbT1^F-%@H*}!8lR5Nt=lqTGKobp%k;yIN*n@ z#)heG3>KYAeSvCuR*#-ZS7(t5R1c;t_Ug{H?;{ElLU8n4cSTqQp2US=@!n-ueyjSc zCLzq(Q89%D{YC@#r`)5^1&%l$p0w6qiE<5z9jWf8Ov3K5QY#VQLXnBMY%h9IYRt3@ zEJ4I-n%h*oXuH12*I%D1wA@yB+6qY0Lj>umR}uo~@fQF=u72p!b`%`AyUd?RPP7-_CZM_v19 zW8(IGd$IZST>GdensHHBQ~5tTLFbk6Q9fLXU9uf-+bh2DWmZI*+MLlhcm~dOibuvD zub%=o=}RV8IfDx3uaT~PkXfNqu8$3yDMO&{ox^4xQK|J}^UG!9T;hgar`Z%YDyqWr z>crk`K^v#IzSwG^Hz9lWhbG#0CDt3s0%Om(()o$j%-{2vI3NXX7d7KJaC05i-t6{C zzg?K@R8WF7+*I24QrT_LvvkR!<4cMTP~1oO~691Iz(hRlHKAqEeEljC*+JF+(4a(xz*- zjzk@M9jO@(-5B3fV2*y@59NaSXX+*|Kk^+`DzC7%$A4>r>g8R{?GZVdqHIyDrs^nH z|K)%B{fr z^I9rI0)8C7flnG%Jcs>_)f!J*t$Ug-YYunCAuHF0NN!Ezfkc(K6^rplNf=e^(5ol^1it?)-!)n;KPIibLdX|q3 z50xzuky?gwtcmqfBVMKraP44Ze150%cZ{|`_n>pJ^RGsUE>Xk)K`1!=v0H8;E@ZHl z#`ZOs2dRiedkl5I=o@C6hRIady7PF)NyO(S?=wP7v*g6!nWI!f9kuRdJWctRpI$wH zGQJ!{ZxV-sUzt@n&ow#k%q5Vt{kaJi*GnJAgnf%H5(sXL3d3ouu^d~>|(EEn+CkVBYL)&??;)`Fq0~Y;bO~hR`u~ko-2yn)OXIY*2gV_NG zkU35@vC^mDaL!&WkoEJP$vM0>PUu!UE2EgM&f-*j*)RL-@7`PX%0&5n0jsq z$Fv20+|1)5uNql5`kaY3>51T+z1dr*LU+*J(`1I|LGYsFpSq;_WL$`yjw^ldHMRA? z4$%5p+ZOUOM7~fG`!o^^Q_sdS`NXa5xor$=Jmy?no35%gd6!;k)Scc2j3@;-6V@Mn zr_|?=h2&NGsgJC#^?M%emq(mX!8u{&n@!rDilL}a7Ev#J1~8_S&=^&`tGqaMq;(%J zO9S*H%{sR}CP)j-2#Euv3)R2{%IWNcMWBiCDlzlWJ(2C^@OacCva%=u zl}whjW;nyabA| zG(n5ZK*G`1A+Xo_`r=i0&%rJ3_M+s6Ws|7}CU~c>*1$C3SkLj6Jm}-J4H;I0r0sRU zu3#e59P1zM`xY1R(%!#ubo1n*guea0>JYJuIu2cEcF_NDUp1El!VH#F+6^t-hs7ES zJ)Bscr$rniID@=Y{w-0>2lz*EFgiWP{*c|(l!2O@sP`a)5pN)F5%S~>J# z$N@HiBX!a4wG$Bes26e2OvUhOacwI@VhE_(q6aN{v?N3_P0Ik#;PWz_OomNA zGSa)mi*w@xmeJQ!KEtl-%hPH_D0ft1&VGkeXjMu78;Z>_C)notcPTyItC~%vkBLDp zPD@O}L1R2E##5|~2P<(yn}YI~HC1$wRp=cG_AU=MS-QbQ@=do4>eAR4ajii1)AW>S z*c&+c70hTZEG65%54GI)C)n>8AALLe^@Y`F)7u9V-Vw8zcrr1TOQnm{*;Ed_L6SH` z%%{m>hxI%5)!)i_eA8i$!wz^=V7gT~(`qO7Vg*LU*>9-7yEA)E zJLWVHAD{WfcJe@t=l7|gQI&a^9-~C`q-)pQb1kpa69L0d&!|!t`dJ0hxw8IuZ{=^u z*xB`<&|AogkO_`j>}dOK9kVQXO!%ema;kmR2bEO@&6D45zh9#E=LNT;|Kwy9m<`JA z3EZ&oTUzo|f__#XCWvpokqD@iJ|Dx_DAlapEB#Q^%p_R-(EM*Y_sU~(0Ar%`3ou$B zG}Q%p&-F3rJ^&s5!{(^qSAO{LP(Y(+ztsnT00cE#;RWQ{54(f4Q3G$+rzp@v*)q6l zv8{0}=R`JrXG_kgB5s9J6rBLWJ()8*@z}Dk`dX z-`v!DkxW%yF_$&42DN)Xe0c@se=kO&cJ<`f;8mos0~+eHee>?O_f7fd3>INZ9W&qW zvj*HXur6Ev1qADB%5ym>%kw_g{6pis0`;8>zbb7tAIKy*nDo8IukVk^85m-pc{+6u zaBfj&^r&Hc`JG8Sf9+6|35|Kwp~>G_`?}=m4qPv#&60jpQvua>?tEwzt9G8$m3Rji z3HSOl@ZgA1a$g1^kSinrBV#CRDVeUaP;Ru9sX6SWUiE7A*?)f#bl6ragxslvCz}Yn z@uqpo#ym&xKT&+Pj%HClTVy05+fxJ1;UJAQ#xu-S$Y&$Ih? zPXt1Tw+uVDkH6_xH@_S=i)R>P^iCHO<%DD*Jhm3^|Wf4fKBz4fgHU^in{ABiD9ON)O}gR zZoW*NyLTLaZ4{KSp*~;g%~Y?E)faf|@kBjdZuB0`l?|=oxM1J-Gh$+v6lJmVB)ubtgMwD1yA0B_J$bu5X?=7KUxHTC@dgw;^P+d#b5{x><^ z`_&|chMjdthrk@8!6*_3iuRo=YVdP&zNLE8@ZU4Az0ZdV;`ecIzufQYMvf!At{EUd zS1gtDV|E|%-nUu!vqLB2k_b!iYLSJ0bZ3YgulM6ru z9jqv(2xKvPr+IWc)$`eQt%di6GZ_m^E%|P>1rd-qL@FZt`@?)b8*l^^Yww->T6^MM z_Z-|{)6wF;u}~m?u-a=wT4-w>iS!`;{?0%R#E6RqL53|_;>Fg(-w5jPwq(Wy1a^4R z3R$U^d0rH@YdoJw5^o^Ydb1kvG?ZZc!k+=HfVdTULem%1<>vcYdb&PDP;HB=o;iwB zn?;J)?G=-;$QLSgQsjPNw6JZ5_TBz$2Jdvwr}K**+0&neW8e#Er6e9l=(&y^{8ggD zQ=|xwQ5gLyyj}5SPiX7IWEnS7VkQ|*-KrlZ(I~^+)b4PNfp<5zX;NTG{wsg!7Y<8! zLkO4phdwIV7kM8#9&!r1w5!x@DFRz1y6QhV%I!DiF>xkQ{n9g7 zZIo&B!oj@Kb_dSdOcDe#FILskwi~N$j$hyw(u*mEyz9?E2{4*i|8aSEJxr*bTdxh; zi3x434%}z5?|Vyfeur9luQPh4(SxdZ$RadrH7G9}HUAT3+xc5@q!KJd?HO=#^fFnpSmkMXtcG7f~Ww!b^khy_j3{HpNBEBXDWkp~$Pe&jS&;3RFA@eWrK)sf?^ zv+6*9iG+maSIfPh$2Iro+&)=;CAJJ$r9N)*3Oum?QSEr?dsoPiAQ<-!e17^cU8vBg z@rCBQSeGk!b+vH$Y#VlokmaLx^G5x-1|Amq;CQQaLDfqT_GqOtws_@p?1!XGfp`tC zZmwyKJ<*R=PoLaVWpI=t{KncAe3oY*9r*ihX{unP{o{9~UMEZnVK0Ka_SE}QtgZMO zt|GHv{aGB`o_*T($jh%QtjBCok}?lR=F#1l2rS2FRN1sAyWJ@7C&LqFTm(2>SM7%_ zKi6^H&&E?B8h)$j?+UN=_Xf>*el0B0qf5nAz`jf0KYRDS7hx_n=E3#!6hgNJkd;aP zQL1S;T(TXJ*na54RHSuR9rM-h^oC<&KBP|8NwX^P6S(lhg)Yi_k>G?$I@a*P+vUR2 z)i-RRr?tWJ56tv)(gnQn&Zs3zJJeOKW~3i2N=h0gR*0B9$@ULCH#v)ZHs=@$c8c3y z2ua%CYriJABSj9;V|f^_pvt|tc3h^JEGj{H-~MevNrD>JnT%uy5sN%5=KkXh>E$lp z(?1?97p^Z}${EBQZ%h#PPt~e;qrXarqON4TZTsV0h8=S&-DOq=-qV9avO|w{)A1J+ zp;yv<)Tf#<|;qyDac^vpB1KEYhufL5esM(Bp&ntgdZPDtCTrJP-dCpU6=;h%RG<$?! z^a;WsWod$u2DE~U6X$m{bME0L@>_)6S5R0w880+7ILe{R4<@~BGa2mJ1FA?fVz3%t zd~nWzX}%NX#JG;i#b&ci(X%2UEwARUgy<$Ct4;YE9&29&vw{~_)6J1s;%z-SXQ_m8 ztJw{mVs*4x{%x+DGR19_wD%3=ywK*Iu)X}J8ug)tcEqSqztgP z0qtjfRuH&X-e01wcG7i+`H&r5g^vxnByn0%9{@x5ILggsS(i0Pzn=z$sBgYVwVih% z4v3*b48B#6?>@OQ3SzHB)lgw`=_yC6lZbrs=CQCuF-g~aw!K2*BZ}6PLlqVY-%kGEc2aK0R9e(@i1F;qt#}`KERbGf zaTN?lyzWBHs&M@g%0a@U-M-nEBGc; z+m_#9)*={YOPGH&Be^w(A7>0fu40$H;jvLUzMa!)wz1EvbY3_zz&JIW!RC|aP*km| zLHv6;#@SkXr5)GR(KVDZos@^dpPZR^E#u11d$=kO*pIJ@_t-5X3O4w}jP0At1&F4WGTFE|XC+hQ&JRGA8}nP3uCb z#XT$<&hVw=hPp9&Z@uH8a7_(hwSxthsE`np&5Aj86ua=pkr2NZrEcv>3=q7L2v{xE z%;x3v9mx8eSkm)y@FQ>)ba4Ob_oPOiugB<492cEr5MCRj3{mLen(gRZJ<&6I7Nud& zy=7~3Qveyt@g|l}3KDUrp^QscfBx5Mw&e0V-5e1+r4S}X?`|7KK1ok6f(FlTnY0cJ zQtkV-PRXL@Zam9iVv8*CC@bZ)pS~Vch?f;@V*yuBkps|xjhP2`d9BD;&(e+9aYONw zc;Ob}4lzU|baD{9w#|pXO+R%cQbH--R|$q;r5-ViU9pKUB3Gv0+~dEM72>%2a}9+b7dT* z#{5MWo1=g39PN&Fzo$*`gA6tbjy61zYZ#p#su+&PnEX2lzO;B2!aMw$slSfbMLEfh z?yL+%rc!l3u<04f4Hs06`jzBmH|Z!O86|ReAcxV78=nUuk?7PL=J^vwH&TNwD{({2 z;dm9xqL^d|ZMK;f^kJ4I9=|+Qt--oYI<#dHShzFn?dHy77lXaPjkav|AGcBLEE(2k znQuK@XAc=-qLcNIg(WhOL~*7(Si1xBl-ec45mO&9tHGLv%y*pEmrb&Qj3q0kh#0^%b%#rNNj%;>KB>~to2&??45PDpMqFz>{t|D>Iod2?ZfuYy-+$%cH(vZr< zG?Av=$t%i=?1`tAXx3t9uIx#1{C1*08w_@s1U#lOE)IC3zKTOPjwKk>cNQY^_x zfoTvX#QY=n@L5|f(=-JA42oLcsv4=aL$7{|--A&jjz1=e*+KAuy+l2chTJ;1NT$|% zWJSL zOL)`hzLzh%Kr_+BFnG)ryvm;1I5Xv_Zy%5ZhY6s^w6;Ix&GH$2nA8hOu{bV!t~zen zY8WrPBzg9&hfdyGvODOOS+qo}L`5W+Ns48Cs$6Bx@U!igC?R>{#gih+AD|CPAMXnD zp+&#*)q1{6z2SpSdJeCRA%cVku7eVYbfjSi#7`z&W25TJY<<2#HqZe$#e){bi;^LK zwm(gmMjs4Km2D=KlAy}=!V9x$KeZ7Q4YXBz*#MFrWRD8JE))i!xajsgz|_7->G_~C zrw2(Mm2X*js|49$*H!^3QW;A9gDzMN$xQ<*Z0kIWG_#cVvUt5G|IF4?PghxXyH%CI ztE_%V4>E7PcX*v+sZG|9V$+@_u_5kEP`S`lq(WpiRZ-Yddlh28l*ZW9J&=_!zudii z(;2OqDJK&Z@{=UbsN%yH+Ow%^!$|JH=T((+BWnbZEu+en>bw9lmi!loX)~|3=|qcl z!qZ;<&Y#YxIrIMIcegKxF`+4>?2CeMiO}+3o5#Gle}=_R9Ee3l^%1MpUxii9wos)i zc!aYryi%I3y1C8ih$qx0k%TC{3OqjNb{?%@xqg%k4no~%)z_3wMC-JV z;B|NFGR#rE1p8`eYJMV;XG@tXGbPOS#VLJ*>-fZEAEnun%o{nUH03=aT@SuExKW(M ztK(u0;FH$P5DtyqEwbp*%faMzLacYa2gU_3c*xt|FGg_1Z)R9MsrOaCGBtXw9&nah z)e}>NTwAn$4d|gUZ}S03C_=1B>vbH}$~!%@*YprNCw+XpcFI1C@=Ot?j*1viak#0# z>Gb4hvrx+)XC{s7y)(uJPY$(%l_i>DHWRV6vxK`3Y&l8jb|?@DeX~}RhGf~*t-k)gZ6X^RqxY@kN5oGyD^|~47BtV%r`+R_ z6d4|uD_%dWbc%s#kE{qz#Ne~83H{m2*|1>B0C{pCx{|V5WG{RbD%# zT}UiDgOppzlbgAEnwOUY#cLqWWzq9Qusai}`2mns5Z=7b-Ad z$rC^5I#vTq3qrKs3#y|Th2!gjP`@M2B|IAbza`73%+AB1!p!$=hU}lgXwuZCWEQmiVB|d zqyeuydye4A&SPIVhH_EHTX3^3 z@P>cZEknUY94{fuHEV!i$-*70q4<`d8?6+I%5dY6kHIozQ|J(=<%-X1Q(Em7c#gj2 zlT`QP#Gr!NWKFY6X>WeK__U~WV0l)B6xe0WmDAtQNQnkHImHY zx$ETa1+nq200x%mry1<7^Di5zG`0BnBu72tkoz)Sc5sy&-KN6Z`?&8fLj_o07*(PqCgrN9W3U;PXQU~_C7ByH$css1%na2-0+T zlKsKpG!NsW*GSxu-&_SIJSMbrx~s!|b+&9fa}9_f3j)tsNV4jl?dHh8#In;kQXT0! z+&ekx4sC4~emV5yc|o~dKi*9BCF@J3&J-d?f7~tK9Hn8w3g`{m1sCaly1UR>7@bt@ zx@9wNDTL9m1;aW$7j#Qbps3`m0*bDEn`4D!wvz}jmzGbWi7d&Di zW;u>yDC=^P*q^>R{4K5WvY`A&waYF&@#Z)-208JcQe0?&+O>qgZE-}z;6Mhei3Go5 zz}7A-twrZYu8jA3=hwllkq=LGUb67T zrShjx#2M`x?hfY3;L10DeFklI*?CfaVMoC=e4#q4(%W{sD{ol-Y`kt7a~9x|-t8aN ziUYC2#heq5gBAYLGZyv#O8HG1gFXd@{=|n=S++&&Hr4&b2;Q*UIij2Vu`yugqXiqP zWQhR5^0S~^%*Jx@kpgxljq`I(vVB!+Jz4tkvj{o=usm$XWelok2}e<22iTndlIcz1 zWIUkd4O@&RtMNv0Y3DR!Z5au4u8626vm4N)Qm|VOkeAQ+=yz=gql$17S>}?&UB;KY zkIU#J|46O$y&L|&Qp>=>Dmx88lQ1>`eEkqh#?x=}@juLu&b*kvZ@u~wSz+@)D(z?E zORB08?Z^m1D)|Nl?f%r>p7q(SBo=wYOp^Es%S4*Pg@9M7_-TwiGkz?p#6x-Vk#(D5 zr8i-$@)4f%6ViTb^R+khrF(nrL8-~@+ZfDZn{Nf@a(i(KKBr5ZJ!*}@Mrv{sb6Uh; zvHUeU4I!a-VW(d|o?ef3Iz1T2wm!)~Axu6RB`6v`Xy-Nj{P|e~MKFERRwAY+W@CX^ zNjHQ;B00>J4PTw@% zH5Rl`!ayAQZDaT~M(%yHWdhJGcQu`vyeL!^=>CKvSKTvhO5A}eWD7i$aVt=NKj;w5 zOQWs@3Pm~pSWD!_q=d_*YV(evs9v|#uNtxIQy%HM#8R19n4_H8FliJMCaLCgox8z- z=roJ#ifAuRJrK=mrh45dae5K+@=qaVo|om%>-eb=32E+s{N5rf9#)#Ic+emcT8tn8 zAMLfEs3NLPonlP8A2i=**9g$E;HTp!n6Q7Y$Sc+ct3fw;6rH1)3mz}mqX85_&bAD3 zmg4pbz?4gii!I~wfOsfM`kjI=9;Qd;93A0UWCbBjdn6YI6W2Gd^t ztun`T6~r|i5*M%pL;;hJw^sB@xuPxx@A?YB}Uc=EsqUoRbioSr!}D8+RH*3*IpC(naXkt zZa?h6HLy!G$8x3saHKn!TZ1ELJLn7C+2Cz}Nl6m9dJ-GEXaZi(i#b6Rh&S*KMBYnK zs$Dz}zSZ(u;W&KDaF=iMswHB|tw~RXthGJ|_%zsokPVq_ zoIPgvw~Td0B|5kyAnHb-SiU1&P}aQfeaHxeiF+^-ThqWR0AFn3#4`NFTs#NYuA9%p zYJe5cTiKJBpR|<^$~n<@n!iu(`whR^D#`z`>ZT7MC)A>zUwp$V>zAkB@11#jW(;h< zyP#6%C?qUHRZLHVrW@$tGC=oS!YK_L7Z}1mvrvWknd#z=kODUE^9LG56D(MP@w$_N zQEe1PnZbx)<9pTN-YfMIgaD{T0hpt5*sc5b;j-YFpvdEN%=s{};CZF^kE`#*s-f?U zRUVqI)^69(M*KZFUPM|vx8DDt@i>c{*P5g(Yok4#GgG^Od@9a8Ojdu$44^<~TSRVU5{sh`wR(p>fzny@!ywEg z&+FbTHkx*pRM6lcl}P%J08-E!)lTlD!dF==L$!03FzH#khg$hngo{)pzH*5y;Gt`a zAg0!_BZxs&(A<_Etr8*0=OgFCwQv^qFd4?BM5VqPPr#YPngw15B)ZYHRj{YznyY;F z;J8``yw1VH4LQ-Ql`*&NvSDtyvBJS@+Ic#<1cvec zfj7kS_AQj5X&#`!O8FkSTSlN;`e?h>6K35w&lD%c!jxxuxVO#bY&E@GtN6QP>-cq! zqtNx_U&0RN++K*f&x4Gh3`5ma38Vzp{~aV3ePnR!)6^UnMhjN^yhdpwxu4!QBEOixzcxi$(}>j`15 zA4+9$>u4Nha(RqcO;A=1VhdT<8Fb#EE>6!*SS{QPDuR$;3^QMW5$JsGC-$9@6CA+N zB$*qKw2+G6Ur0@+8E7R9gdYiBR{98W{tB(X;?F}cz^3n^m%Yk7a9LI!Z{@rXVIyZk z1W5DKC}gFqJti`#;Gn#`pr2QA8jj!VsK)7FW@IfA>rTyY*EmowG){&plX1&O#EYyJ zt|>D>HkUZQQ`!==963{(5%xz&!C)F#bavz1Hff6GP2I6iBNVZCmR>6c4o-P9j-m-I zkGdVl@hKqmWUlU{VvA||I@vNbpyXU97~RfN+M=#=$@I+ESx=tY>!9Kj70Q-)bqf|( z1Sgu_>sw&nV!sGMT3wP|)U+i{8aV}8U7ZT1M>sq&|5biON~GKE#M{ZemP3FfL*ha_ zn6-HN2SSFeTr@aq-nQeae>=Y?O^WNNbR5!cq|s;^?n{Qh(k$B7k$Ho&MWBV39%P&q ze6l}^{13rqIc=V3E2d?W($@$Yu)dgf_qFOEU;d|C47 z7oDW6RAMRck*;h`7z_QOroR0)ck9de){pq_=5??bSTCv21PEgJ4Z=kOl7{%%qE{I# zIx6?&Bab#jPEwKzAO6m{A+}ibKO(16S(w61H9`Lo`910JfL?Mu6pblj6;B%WnO;$g zOnljI(?+1E6W=)iS6&E4uqjj2;}4(j?e<4GYE|R5(Z~{qy;1*EB)FZu|M>{b0PXj4 zjAEh_nZS{C)v@Ssi+@>+7eza9LkF9xn)93R=%l=d-4J0Cl>%k5bh^4>OBoA?RMqUD z{W(@NwKSlYsV8{e-t6*K@w=<_>WHF}pUKP%u1P{w9Fp^XPsO4>J!&)^_IMnM^O*E6 zhw>@Sa5=XLMFfVBC17fCV&O8C+2htsieU@>i*mJWYOJikXSW;X=}?b0g7$OOH00KI zKrPBK9!4I0@KWUtds?m@Op4yAUs_LrfotT&w5&~n<#Wi#d*4Og!`bm7tYN&HE^A!e zVZ5D8e#BB#6t-t)Ih(Cbn{=iS%X>2-+hLY+3SU@Dm(9prM~ZqHtZ*3n?%ipKN8KkE zSxc?fb}qI!QWEXX5Gh3~sSk@uAw{hC3*Ot%9hr?6Zeuukp~ZMD%8N2`X-a*Auf9V2 zN7x;uG>eusKNCtgZm9b(DBhw|G#ED{*RJ9-P`yY2si6I+NJ)L#C<5r&;QqGp+7r%CmDHWHNJn^csVg$0tT9q8F9w-3dHf7!5p48(87i zEU)-M+v~9Qc)OziW5T;cdbdl*s0i1jQ87e4$h>>~HNKQ?TK9u+ymp4E3AOCrdvD8y;d}zCji*!n4JPtFZ z+K5beSFCcElIw=$FT2eYzCm^YRO2l^hO!c>#k9rb`f#b<4GdJ7a59z5Y#i=;aY(1w zpTx?&(W#FE?ze)2mvvIrY<3|begW&XCaF#uS%lQpW>B(%0*#>9iwdu&MmWX;SxF0- zqwwa_!upyXYW2r?#m4Ylk76^s7K!CL`guxIy_$ZmQ}MVCNC@&FEB}GTrf7lPTuo|Y zv1-!2R&bDUx6jmz;JbGhozBlz$wNGagwFXjlp9PGCt?E%Y-}_Ri}e=Y1Tb}Ylr!>S z|HDE5H+5aaP-#&z3afAPA1eF57~yMAF&TuM8+)2u6;Ng>;)bP4yOw46KFMcNfQD{w zrMrW00BDyE{J*^X&lrY~#PPv*x8;mfxWD`K1{1>ZZn%R@=1+Jemt3QWdX_&-#_IoR zpD~9>dI`cSwzB<=P;27%aLCzBIX5uPXm^fKYSVE3NecIOPU!xAb!7uFH{!G-ea5xs zl#IpFLy}O=)_sZq=O!@5nFObvd%LLmEgxJs&hh6`S;mUM?wbzAuZtOP)BltjH}rB} zOLh=6Hqy4Uv%yQFc4RRtPO;?`oO3j^x0^LF`bLC>%(?D%943_l51S3oyeBD$p0ptFF2CxU z(8l56n&jRi|72nlug08Vmjc%mwimq?Z3?6B|NUh^kkwyoU$M-{i&m}-N^#%xb?jts zMp+s=5e|0;)2sekUi$v)FLNn{UYW+}Y7{0@X=>E&;9wF@oX3(zhRaN(ZX>H`XR-0L z8WSA8_cbQUL5u-2_;DmTF+SxN2mVC(8!2n^Bb?mLP}cLBsBV&M%d*|eT$9q>ky4|-HQrU>MwPV1gb|O zml`=gdAByQocb)0t_e84lubO&9#YeKShBTjd)Smx^KVuBYQ(}oKv!5X?MH+fM-vo| zn~h~Q&=e83e^qSCc*W=qgT_ zhUzfR9ynjiNUkfv9qf+9VGe}5{FO=sTvuZpI?SfU8HbMAV(htlx@5vVPSoO(Mu)bu z;9O6nfY$BUW&e{G?g{l889g;m=$FzNK8{4vNR$1u8kEW(ut6E!o&}(lB2eRVsJg0}sQ+wSt^>J*@cRBKDQ^=q2+&6!Qk5bRbttiVIwVbCLF@nXK250E&I&&V4H9y%Q zC^_OiXDn-5hNF0Y8BPknaQ^(LHhn@ zpK;XEk~r!1N2Ob}6R#|M*7jA_n@J&!`~Eme zJA31&xZLON8Z!5m{_X%gwCMfN`QdWkJG~j!-DI`1Ny|Lg-}^f#F-<>32OAecpG7e+2-DP8FdQ0T;6rZ(EmrsqiCx|uHYg_;z{8}-{KQ{s?vn+GxNf7 z0p)Zlffi-yV$ecPrCZJO|EWVl4q`mv%R}6F)k93oDUuZ!s3*kL9wkzKv_>!TfmqGC zM3rX4U+0s2)zQt^9ZgNga#vMmC#~H=0W@V))o+ahw7EK&;BcI?d2VOVPK3BZY=Z|e zJ)1LQ?kGDfoJN&*k}HQdy7LJ zG4>J6<48t5trm$7v%x`<%*RimWy;*WgM8_sO&{}i_^@v=er zXrnOOFdY#cv|pQg?BDC<5vT#PSx*V1>%`OGntuSKdc6wi4u3wO`cI`4`Wa?tX)u=* zG5hb5xTkD#7)s!3)Bm4hxBm_X{NgcAnx!^Hl;VGv+J%A}@&1wQUw_H{1*(%wpJc}T zr>g9KO7V~t=Ggz;BL?pL3j_A>7imBGUy>><8dd&(3i^`%1|oK8&$#aWcljUBo??4$ XZw;$JFD|AE5qPYo`>0aII_&=daWI3Z literal 0 HcmV?d00001 diff --git a/src/prometheus/docs/index.md b/src/prometheus/docs/index.md new file mode 100644 index 0000000..3f2c17f --- /dev/null +++ b/src/prometheus/docs/index.md @@ -0,0 +1,20 @@ +--- +# todo: internal +--- + +# Prometheus 2.0 + +Welcome to the documentation of the Prometheus server. + +The documentation is available alongside all the project documentation at +[prometheus.io](https://prometheus.io/docs/prometheus/2.0/). + +## Content + +- [Getting started](getting_started.md) +- [Installation](installation.md) +- [Configuration](configuration/configuration.md) +- [Querying](querying/basics.md) +- [Storage](storage.md) +- [Federation](federation.md) +- [Migration](migration.md) diff --git a/src/prometheus/docs/installation.md b/src/prometheus/docs/installation.md new file mode 100644 index 0000000..0c20fbf --- /dev/null +++ b/src/prometheus/docs/installation.md @@ -0,0 +1,96 @@ +--- +title: Installation +sort_rank: 2 +--- + +# Installation + +## Using pre-compiled binaries + +We provide precompiled binaries for most official Prometheus components. Check +out the [download section](https://prometheus.io/download) for a list of all +available versions. + +## From source + +For building Prometheus components from source, see the `Makefile` targets in +the respective repository. + +## Using Docker + +All Prometheus services are available as Docker images under the +[prom](https://hub.docker.com/u/prom/) organization. + +Running Prometheus on Docker is as simple as `docker run -p 9090:9090 +prom/prometheus`. This starts Prometheus with a sample configuration and +exposes it on port 9090. + +The Prometheus image uses a volume to store the actual metrics. For +production deployments it is highly recommended to use the +[Data Volume Container](https://docs.docker.com/engine/admin/volumes/volumes/) +pattern to ease managing the data on Prometheus upgrades. + +To provide your own configuration, there are several options. Here are +two examples. + +### Volumes & bind-mount + +Bind-mount your `prometheus.yml` from the host by running: + +```bash +docker run -p 9090:9090 -v /tmp/prometheus.yml:/etc/prometheus/prometheus.yml \ + prom/prometheus +``` + +Or use an additional volume for the config: + +```bash +docker run -p 9090:9090 -v /prometheus-data \ + prom/prometheus --config.file=/prometheus-data/prometheus.yml +``` + +### Custom image + +To avoid managing a file on the host and bind-mount it, the +configuration can be baked into the image. This works well if the +configuration itself is rather static and the same across all +environments. + +For this, create a new directory with a Prometheus configuration and a +`Dockerfile` like this: + +```Dockerfile +FROM prom/prometheus +ADD prometheus.yml /etc/prometheus/ +``` + +Now build and run it: + +```bash +docker build -t my-prometheus . +docker run -p 9090:9090 my-prometheus +``` + +A more advanced option is to render the configuration dynamically on start +with some tooling or even have a daemon update it periodically. + +## Using configuration management systems + +If you prefer using configuration management systems you might be interested in +the following third-party contributions: + +### Ansible + +* [Cloud Alchemy/ansible-prometheus](https://github.com/cloudalchemy/ansible-prometheus) + +### Chef + +* [rayrod2030/chef-prometheus](https://github.com/rayrod2030/chef-prometheus) + +### Puppet + +* [puppet/prometheus](https://forge.puppet.com/puppet/prometheus) + +### SaltStack + +* [bechtoldt/saltstack-prometheus-formula](https://github.com/bechtoldt/saltstack-prometheus-formula) diff --git a/src/prometheus/docs/migration.md b/src/prometheus/docs/migration.md new file mode 100644 index 0000000..9ddce39 --- /dev/null +++ b/src/prometheus/docs/migration.md @@ -0,0 +1,201 @@ +--- +title: Migration +sort_rank: 7 +--- + +# Prometheus 2.0 migration guide + +In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print), +the Prometheus 2.0 release contains a number of backwards incompatible changes. +This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0. + +## Flags + +The format of the Prometheus command line flags has changed. Instead of a +single dash, all flags now use a double dash. Common flags (`--config.file`, +`--web.listen-address` and `--web.external-url`) are still the same but beyond +that, almost all the storage-related flags have been removed. + +Some notable flags which have been removed: + +- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring + a static Alertmanager URL have been removed. Alertmanager must now be + discovered via service discovery, see [Alertmanager service discovery](#amsd). + +- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error. + +- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus + 2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness). + +- `-storage.local.*` Prometheus 2.0 introduces a new storage engine, as such all + flags relating to the old engine have been removed. For information on the + new engine, see [Storage](#storage). + +- `-storage.remote.*` Prometheus 2.0 has removed the already deprecated remote + storage flags, and will fail to start if they are supplied. To write to + InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter. + +## Alertmanager service discovery + +Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus +to dynamically discover Alertmanager replicas using the same mechanism as scrape +targets. In Prometheus 2.0, the command line flags for static Alertmanager config +have been removed, so the following command line flag: + +``` +./prometheus -alertmanager.url=http://alertmanager:9093/ +``` + +Would be replaced with the following in the `prometheus.yml` config file: + +```yaml +alerting: + alertmanagers: + - static_configs: + - targets: + - alertmanager:9093 +``` + +You can also use all the usual Prometheus service discovery integrations and +relabeling in your Alertmanager configuration. This snippet instructs +Prometheus to search for Kubernetes pods, in the `default` namespace, with the +label `name: alertmanager` and with a non-empty port. + +```yaml +alerting: + alertmanagers: + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_name] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_namespace] + regex: default + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: + action: drop +``` + +## Recording rules and alerts + +The format for configuring alerting and recording rules has been changed to YAML. +An example of a recording rule and alert in the old format: + +``` +job:request_duration_seconds:histogram_quantile99 = + histogram_quantile(0.99, sum(rate(request_duration_seconds_bucket[1m])) by (le, job)) + +ALERT FrontendRequestLatency + IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 + FOR 5m + ANNOTATIONS { + summary = "High frontend request latency", + } +``` + +Would look like this: + +```yaml +groups: +- name: example.rules + rules: + - record: job:request_duration_seconds:histogram_quantile99 + expr: histogram_quantile(0.99, sum(rate(request_duration_seconds_bucket[1m])) + BY (le, job)) + - alert: FrontendRequestLatency + expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 + for: 5m + annotations: + summary: High frontend request latency +``` + +To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the +new format. For example: + +``` +$ promtool update rules example.rules +``` + +Note that you will need to use promtool from 2.0, not 1.8. + +## Storage + +The data format in Prometheus 2.0 has completely changed and is not backwards +compatible with 1.8. To retain access to your historic monitoring data we +recommend you run a non-scraping Prometheus instance running at least version +1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server +read existing data from the old one via the remote read protocol. + +Your Prometheus 1.8 instance should be started with the following flags and an +config file containing only the `external_labels` setting (if any): + +``` +$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml +``` + +Prometheus 2.0 can then be started (on the same machine) with the following flags: + +``` +$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml +``` + +Where `prometheus.yml` contains in addition to your full existing configuration, the stanza: + +```yaml +remote_read: + - url: "http://localhost:9094/api/v1/read" +``` + +## PromQL + +The following features have been removed from PromQL: + +- `drop_common_labels` function - the `without` aggregation modifier should be used + instead. +- `keep_common` aggregation modifier - the `by` modifier should be used instead. +- `count_scalar` function - use cases are better handled by `absent()` or correct + propagation of labels in operations. + +See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more +details. + +## Miscellaneous + +### Prometheus non-root user + +The Prometheus Docker image is now built to [run Prometheus +as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you +want the Prometheus UI/API to listen on a low port number (say, port 80), you'll +need to override it. For Kubernetes, you would use the following YAML: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: security-context-demo-2 +spec: + securityContext: + runAsUser: 0 +... +``` + +See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +for more details. + +If you're using Docker, then the following snippet would be used: + +``` +docker run -u root -p 80:80 prom/prometheus:v2.0.0-rc.2 --web.listen-address :80 +``` + +### Prometheus lifecycle + +If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your +Prometheus config when it changes](configuration/configuration.md), +these endpoints are disabled by default for security reasons in Prometheus 2.0. +To enable them, set the `--web.enable-lifecycle` flag. diff --git a/src/prometheus/docs/querying/api.md b/src/prometheus/docs/querying/api.md new file mode 100644 index 0000000..d00716c --- /dev/null +++ b/src/prometheus/docs/querying/api.md @@ -0,0 +1,515 @@ +--- +title: HTTP API +sort_rank: 7 +--- + +# HTTP API + +The current stable HTTP API is reachable under `/api/v1` on a Prometheus +server. Any non-breaking additions will be added under that endpoint. + +## Format overview + +The API response format is JSON. Every successful API request returns a `2xx` +status code. + +Invalid requests that reach the API handlers return a JSON error object +and one of the following HTTP response codes: + +- `400 Bad Request` when parameters are missing or incorrect. +- `422 Unprocessable Entity` when an expression can't be executed + ([RFC4918](http://tools.ietf.org/html/rfc4918#page-78)). +- `503 Service Unavailable` when queries time out or abort. + +Other non-`2xx` codes may be returned for errors occurring before the API +endpoint is reached. + +The JSON response envelope format is as follows: + +``` +{ + "status": "success" | "error", + "data": , + + // Only set if status is "error". The data field may still hold + // additional data. + "errorType": "", + "error": "" +} +``` + +Input timestamps may be provided either in +[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp +in seconds, with optional decimal places for sub-second precision. Output +timestamps are always represented as Unix timestamps in seconds. + +Names of query parameters that may be repeated end with `[]`. + +`` placeholders refer to Prometheus [time series +selectors](basics.md#time-series-selectors) like `http_requests_total` or +`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. + +`` placeholders refer to Prometheus duration strings of the form +`[0-9]+[smhdwy]`. For example, `5m` refers to a duration of 5 minutes. + +## Expression queries + +Query language expressions may be evaluated at a single instant or over a range +of time. The sections below describe the API endpoints for each type of +expression query. + +### Instant queries + +The following endpoint evaluates an instant query at a single point in time: + +``` +GET /api/v1/query +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. +- `time=`: Evaluation timestamp. Optional. +- `timeout=`: Evaluation timeout. Optional. Defaults to and + is capped by the value of the `-query.timeout` flag. + +The current server time is used if the `time` parameter is omitted. + +The `data` section of the query result has the following format: + +``` +{ + "resultType": "matrix" | "vector" | "scalar" | "string", + "result": +} +``` + +`` refers to the query result data, which has varying formats +depending on the `resultType`. See the [expression query result +formats](#expression-query-result-formats). + +The following example evaluates the expression `up` at the time +`2015-07-01T20:10:51.781Z`: + +```json +$ curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z' +{ + "status" : "success", + "data" : { + "resultType" : "vector", + "result" : [ + { + "metric" : { + "__name__" : "up", + "job" : "prometheus", + "instance" : "localhost:9090" + }, + "value": [ 1435781451.781, "1" ] + }, + { + "metric" : { + "__name__" : "up", + "job" : "node", + "instance" : "localhost:9100" + }, + "value" : [ 1435781451.781, "0" ] + } + ] + } +} +``` + +### Range queries + +The following endpoint evaluates an expression query over a range of time: + +``` +GET /api/v1/query_range +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. +- `start=`: Start timestamp. +- `end=`: End timestamp. +- `step=`: Query resolution step width. +- `timeout=`: Evaluation timeout. Optional. Defaults to and + is capped by the value of the `-query.timeout` flag. + +The `data` section of the query result has the following format: + +``` +{ + "resultType": "matrix", + "result": +} +``` + +For the format of the `` placeholder, see the [range-vector result +format](#range-vectors). + +The following example evaluates the expression `up` over a 30-second range with +a query resolution of 15 seconds. + +```json +$ curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s' +{ + "status" : "success", + "data" : { + "resultType" : "matrix", + "result" : [ + { + "metric" : { + "__name__" : "up", + "job" : "prometheus", + "instance" : "localhost:9090" + }, + "values" : [ + [ 1435781430.781, "1" ], + [ 1435781445.781, "1" ], + [ 1435781460.781, "1" ] + ] + }, + { + "metric" : { + "__name__" : "up", + "job" : "node", + "instance" : "localhost:9091" + }, + "values" : [ + [ 1435781430.781, "0" ], + [ 1435781445.781, "0" ], + [ 1435781460.781, "1" ] + ] + } + ] + } +} +``` + +## Querying metadata + +### Finding series by label matchers + +The following endpoint returns the list of time series that match a certain label set. + +``` +GET /api/v1/series +``` + +URL query parameters: + +- `match[]=`: Repeated series selector argument that selects the + series to return. At least one `match[]` argument must be provided. +- `start=`: Start timestamp. +- `end=`: End timestamp. + +The `data` section of the query result consists of a list of objects that +contain the label name/value pairs which identify each series. + +The following example returns all series that match either of the selectors +`up` or `process_start_time_seconds{job="prometheus"}`: + +```json +$ curl -g 'http://localhost:9090/api/v1/series?match[]=up&match[]=process_start_time_seconds{job="prometheus"}' +{ + "status" : "success", + "data" : [ + { + "__name__" : "up", + "job" : "prometheus", + "instance" : "localhost:9090" + }, + { + "__name__" : "up", + "job" : "node", + "instance" : "localhost:9091" + }, + { + "__name__" : "process_start_time_seconds", + "job" : "prometheus", + "instance" : "localhost:9090" + } + ] +} +``` + +### Querying label values + +The following endpoint returns a list of label values for a provided label name: + +``` +GET /api/v1/label//values +``` + +The `data` section of the JSON response is a list of string label names. + +This example queries for all label values for the `job` label: + +```json +$ curl http://localhost:9090/api/v1/label/job/values +{ + "status" : "success", + "data" : [ + "node", + "prometheus" + ] +} +``` + +## Expression query result formats + +Expression queries may return the following response values in the `result` +property of the `data` section. `` placeholders are numeric +sample values. JSON does not support special float values such as `NaN`, `Inf`, +and `-Inf`, so sample values are transferred as quoted JSON strings rather than +raw numbers. + +### Range vectors + +Range vectors are returned as result type `matrix`. The corresponding +`result` property has the following format: + +``` +[ + { + "metric": { "": "", ... }, + "values": [ [ , "" ], ... ] + }, + ... +] +``` + +### Instant vectors + +Instant vectors are returned as result type `vector`. The corresponding +`result` property has the following format: + +``` +[ + { + "metric": { "": "", ... }, + "value": [ , "" ] + }, + ... +] +``` + +### Scalars + +Scalar results are returned as result type `scalar`. The corresponding +`result` property has the following format: + +``` +[ , "" ] +``` + +### Strings + +String results are returned as result type `string`. The corresponding +`result` property has the following format: + +``` +[ , "" ] +``` + +## Targets + +The following endpoint returns an overview of the current state of the +Prometheus target discovery: + +``` +GET /api/v1/targets +``` + +Both the active and dropped targets are part of the response. +`labels` represents the label set after relabelling has occurred. +`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabelling has occurred. + +```json +$ curl http://localhost:9090/api/v1/targets +{ + "status": "success", [3/11] + "data": { + "activeTargets": [ + { + "discoveredLabels": { + "__address__": "127.0.0.1:9090", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "prometheus" + }, + "labels": { + "instance": "127.0.0.1:9090", + "job": "prometheus" + }, + "scrapeUrl": "http://127.0.0.1:9090/metrics", + "lastError": "", + "lastScrape": "2017-01-17T15:07:44.723715405+01:00", + "health": "up" + } + ], + "droppedTargets": [ + { + "discoveredLabels": { + "__address__": "127.0.0.1:9100", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "node" + }, + } + ] + } +} +``` + +## Alertmanagers + +The following endpoint returns an overview of the current state of the +Prometheus alertmanager discovery: + +``` +GET /api/v1/alertmanagers +``` + +Both the active and dropped Alertmanagers are part of the response. + +```json +$ curl http://localhost:9090/api/v1/alertmanagers +{ + "status": "success", + "data": { + "activeAlertmanagers": [ + { + "url": "http://127.0.0.1:9090/api/v1/alerts" + } + ], + "droppedAlertmanagers": [ + { + "url": "http://127.0.0.1:9093/api/v1/alerts" + } + ] + } +} +``` + +## Status + +Following status endpoints expose current Prometheus configuration. + +### Config + +The following endpoint returns currently loaded configuration file: + +``` +GET /api/v1/status/config +``` + +The config is returned as dumped YAML file. Due to limitation of the YAML +library, YAML comments are not included. + +```json +$ curl http://localhost:9090/api/v1/status/config +{ + "status": "success", + "data": { + "yaml": "", + } +} +``` + +### Flags + +The following endpoint returns flag values that Prometheus was configured with: + +``` +GET /api/v1/status/flags +``` + +All values are in a form of "string". + +```json +$ curl http://localhost:9090/api/v1/status/flags +{ + "status": "success", + "data": { + "alertmanager.notification-queue-capacity": "10000", + "alertmanager.timeout": "10s", + "log.level": "info", + "query.lookback-delta": "5m", + "query.max-concurrency": "20", + ... + } +} +``` + +*New in v2.2* + +## TSDB Admin APIs +These are APIs that expose database functionalities for the advanced user. These APIs are not enabled unless the `--web.enable-admin-api` is set. + +We also expose a gRPC API whose definition can be found [here](https://github.com/prometheus/prometheus/blob/master/prompb/rpc.proto). This is experimental and might change in the future. + +### Snapshot +Snapshot creates a snapshot of all current data into `snapshots/-` under the TSDB's data directory and returns the directory as response. +It will optionally skip snapshotting data that is only present in the head block, and which has not yet been compacted to disk. + +``` +POST /api/v1/admin/tsdb/snapshot?skip_head= +``` + +```json +$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot +{ + "status": "success", + "data": { + "name": "20171210T211224Z-2be650b6d019eb54" + } +} +``` + +The snapshot now exists at `/snapshots/20171210T211224Z-2be650b6d019eb54` + +*New in v2.1* + +### Delete Series +DeleteSeries deletes data for a selection of series in a time range. The actual data still exists on disk and is cleaned up in future compactions or can be explicitly cleaned up by hitting the Clean Tombstones endpoint. + +If successful, a `204` is returned. + +``` +POST /api/v1/admin/tsdb/delete_series +``` + +URL query parameters: + +- `match[]=`: Repeated label matcher argument that selects the series to delete. At least one `match[]` argument must be provided. +- `start=`: Start timestamp. Optional and defaults to minimum possible time. +- `end=`: End timestamp. Optional and defaults to maximum possible time. + +Not mentioning both start and end times would clear all the data for the matched series in the database. + +Example: + +```json +$ curl -X POST \ + -g 'http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up&match[]=process_start_time_seconds{job="prometheus"}' +``` +*New in v2.1* + +### Clean Tombstones +CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. This can be used after deleting series to free up space. + +If successful, a `204` is returned. + +``` +POST /api/v1/admin/tsdb/clean_tombstones +``` + +This takes no parameters or body. + +```json +$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones +``` + +*New in v2.1* diff --git a/src/prometheus/docs/querying/basics.md b/src/prometheus/docs/querying/basics.md new file mode 100644 index 0000000..65798b7 --- /dev/null +++ b/src/prometheus/docs/querying/basics.md @@ -0,0 +1,226 @@ +--- +title: Querying basics +nav_title: Basics +sort_rank: 1 +--- + +# Querying Prometheus + +Prometheus provides a functional expression language that lets the user select +and aggregate time series data in real time. The result of an expression can +either be shown as a graph, viewed as tabular data in Prometheus's expression +browser, or consumed by external systems via the [HTTP API](api.md). + +## Examples + +This document is meant as a reference. For learning, it might be easier to +start with a couple of [examples](examples.md). + +## Expression language data types + +In Prometheus's expression language, an expression or sub-expression can +evaluate to one of four types: + +* **Instant vector** - a set of time series containing a single sample for each time series, all sharing the same timestamp +* **Range vector** - a set of time series containing a range of data points over time for each time series +* **Scalar** - a simple numeric floating point value +* **String** - a simple string value; currently unused + +Depending on the use-case (e.g. when graphing vs. displaying the output of an +expression), only some of these types are legal as the result from a +user-specified expression. For example, an expression that returns an instant +vector is the only type that can be directly graphed. + +## Literals + +### String literals + +Strings may be specified as literals in single quotes, double quotes or +backticks. + +PromQL follows the same [escaping rules as +Go](https://golang.org/ref/spec#String_literals). In single or double quotes a +backslash begins an escape sequence, which may be followed by `a`, `b`, `f`, +`n`, `r`, `t`, `v` or `\`. Specific characters can be provided using octal +(`\nnn`) or hexadecimal (`\xnn`, `\unnnn` and `\Unnnnnnnn`). + +No escaping is processed inside backticks. Unlike Go, Prometheus does not discard newlines inside backticks. + +Example: + + "this is a string" + 'these are unescaped: \n \\ \t' + `these are not unescaped: \n ' " \t` + +### Float literals + +Scalar float values can be literally written as numbers of the form +`[-](digits)[.(digits)]`. + + -2.43 + +## Time series Selectors + +### Instant vector selectors + +Instant vector selectors allow the selection of a set of time series and a +single sample value for each at a given timestamp (instant): in the simplest +form, only a metric name is specified. This results in an instant vector +containing elements for all time series that have this metric name. + +This example selects all time series that have the `http_requests_total` metric +name: + + http_requests_total + +It is possible to filter these time series further by appending a set of labels +to match in curly braces (`{}`). + +This example selects only those time series with the `http_requests_total` +metric name that also have the `job` label set to `prometheus` and their +`group` label set to `canary`: + + http_requests_total{job="prometheus",group="canary"} + +It is also possible to negatively match a label value, or to match label values +against regular expressions. The following label matching operators exist: + +* `=`: Select labels that are exactly equal to the provided string. +* `!=`: Select labels that are not equal to the provided string. +* `=~`: Select labels that regex-match the provided string (or substring). +* `!~`: Select labels that do not regex-match the provided string (or substring). + +For example, this selects all `http_requests_total` time series for `staging`, +`testing`, and `development` environments and HTTP methods other than `GET`. + + http_requests_total{environment=~"staging|testing|development",method!="GET"} + +Label matchers that match empty label values also select all time series that do +not have the specific label set at all. Regex-matches are fully anchored. + +Vector selectors must either specify a name or at least one label matcher +that does not match the empty string. The following expression is illegal: + + {job=~".*"} # Bad! + +In contrast, these expressions are valid as they both have a selector that does not +match empty label values. + + {job=~".+"} # Good! + {job=~".*",method="get"} # Good! + +Label matchers can also be applied to metric names by matching against the internal +`__name__` label. For example, the expression `http_requests_total` is equivalent to +`{__name__="http_requests_total"}`. Matchers other than `=` (`!=`, `=~`, `!~`) may also be used. +The following expression selects all metrics that have a name starting with `job:`: + + {__name__=~"job:.*"} + +All regular expressions in Prometheus use [RE2 +syntax](https://github.com/google/re2/wiki/Syntax). + +### Range Vector Selectors + +Range vector literals work like instant vector literals, except that they +select a range of samples back from the current instant. Syntactically, a range +duration is appended in square brackets (`[]`) at the end of a vector selector +to specify how far back in time values should be fetched for each resulting +range vector element. + +Time durations are specified as a number, followed immediately by one of the +following units: + +* `s` - seconds +* `m` - minutes +* `h` - hours +* `d` - days +* `w` - weeks +* `y` - years + +In this example, we select all the values we have recorded within the last 5 +minutes for all time series that have the metric name `http_requests_total` and +a `job` label set to `prometheus`: + + http_requests_total{job="prometheus"}[5m] + +### Offset modifier + +The `offset` modifier allows changing the time offset for individual +instant and range vectors in a query. + +For example, the following expression returns the value of +`http_requests_total` 5 minutes in the past relative to the current +query evaluation time: + + http_requests_total offset 5m + +Note that the `offset` modifier always needs to follow the selector +immediately, i.e. the following would be correct: + + sum(http_requests_total{method="GET"} offset 5m) // GOOD. + +While the following would be *incorrect*: + + sum(http_requests_total{method="GET"}) offset 5m // INVALID. + +The same works for range vectors. This returns the 5-minutes rate that +`http_requests_total` had a week ago: + + rate(http_requests_total[5m] offset 1w) + +## Operators + +Prometheus supports many binary and aggregation operators. These are described +in detail in the [expression language operators](operators.md) page. + +## Functions + +Prometheus supports several functions to operate on data. These are described +in detail in the [expression language functions](functions.md) page. + +## Gotchas + +### Staleness + +When queries are run, timestamps at which to sample data are selected +independently of the actual present time series data. This is mainly to support +cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated +time series do not exactly align in time. Because of their independence, +Prometheus needs to assign a value at those timestamps for each relevant time +series. It does so by simply taking the newest sample before this timestamp. + +If a target scrape or rule evaluation no longer returns a sample for a time +series that was previously present, that time series will be marked as stale. +If a target is removed, its previously returned time series will be marked as +stale soon afterwards. + +If a query is evaluated at a sampling timestamp after a time series is marked +stale, then no value is returned for that time series. If new samples are +subsequently ingested for that time series, they will be returned as normal. + +If no sample is found (by default) 5 minutes before a sampling timestamp, +no value is returned for that time series at this point in time. This +effectively means that time series "disappear" from graphs at times where their +latest collected sample is older than 5 minutes or after they are marked stale. + +Staleness will not be marked for time series that have timestamps included in +their scrapes. Only the 5 minute threshold will be applied in that case. + +### Avoiding slow queries and overloads + +If a query needs to operate on a very large amount of data, graphing it might +time out or overload the server or browser. Thus, when constructing queries +over unknown data, always start building the query in the tabular view of +Prometheus's expression browser until the result set seems reasonable +(hundreds, not thousands, of time series at most). Only when you have filtered +or aggregated your data sufficiently, switch to graph mode. If the expression +still takes too long to graph ad-hoc, pre-record it via a [recording +rule](../configuration/recording_rules.md#recording-rules). + +This is especially relevant for Prometheus's query language, where a bare +metric name selector like `api_http_requests_total` could expand to thousands +of time series with different labels. Also keep in mind that expressions which +aggregate over many time series will generate load on the server even if the +output is only a small number of time series. This is similar to how it would +be slow to sum all values of a column in a relational database, even if the +output value is only a single number. diff --git a/src/prometheus/docs/querying/examples.md b/src/prometheus/docs/querying/examples.md new file mode 100644 index 0000000..5827ca5 --- /dev/null +++ b/src/prometheus/docs/querying/examples.md @@ -0,0 +1,86 @@ +--- +title: Querying examples +nav_title: Examples +sort_rank: 4 +--- + +# Query examples + +## Simple time series selection + +Return all time series with the metric `http_requests_total`: + + http_requests_total + +Return all time series with the metric `http_requests_total` and the given +`job` and `handler` labels: + + http_requests_total{job="apiserver", handler="/api/comments"} + +Return a whole range of time (in this case 5 minutes) for the same vector, +making it a range vector: + + http_requests_total{job="apiserver", handler="/api/comments"}[5m] + +Note that an expression resulting in a range vector cannot be graphed directly, +but viewed in the tabular ("Console") view of the expression browser. + +Using regular expressions, you could select time series only for jobs whose +name match a certain pattern, in this case, all jobs that end with `server`. +Note that this does a substring match, not a full string match: + + http_requests_total{job=~".*server"} + +All regular expressions in Prometheus use [RE2 +syntax](https://github.com/google/re2/wiki/Syntax). + +To select all HTTP status codes except 4xx ones, you could run: + + http_requests_total{status!~"4.."} + +## Using functions, operators, etc. + +Return the per-second rate for all time series with the `http_requests_total` +metric name, as measured over the last 5 minutes: + + rate(http_requests_total[5m]) + +Assuming that the `http_requests_total` time series all have the labels `job` +(fanout by job name) and `instance` (fanout by instance of the job), we might +want to sum over the rate of all instances, so we get fewer output time series, +but still preserve the `job` dimension: + + sum(rate(http_requests_total[5m])) by (job) + +If we have two different metrics with the same dimensional labels, we can apply +binary operators to them and elements on both sides with the same label set +will get matched and propagated to the output. For example, this expression +returns the unused memory in MiB for every instance (on a fictional cluster +scheduler exposing these metrics about the instances it runs): + + (instance_memory_limit_bytes - instance_memory_usage_bytes) / 1024 / 1024 + +The same expression, but summed by application, could be written like this: + + sum( + instance_memory_limit_bytes - instance_memory_usage_bytes + ) by (app, proc) / 1024 / 1024 + +If the same fictional cluster scheduler exposed CPU usage metrics like the +following for every instance: + + instance_cpu_time_ns{app="lion", proc="web", rev="34d0f99", env="prod", job="cluster-manager"} + instance_cpu_time_ns{app="elephant", proc="worker", rev="34d0f99", env="prod", job="cluster-manager"} + instance_cpu_time_ns{app="turtle", proc="api", rev="4d3a513", env="prod", job="cluster-manager"} + instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="cluster-manager"} + ... + +...we could get the top 3 CPU users grouped by application (`app`) and process +type (`proc`) like this: + + topk(3, sum(rate(instance_cpu_time_ns[5m])) by (app, proc)) + +Assuming this metric contains one time series per running instance, you could +count the number of running instances per application like this: + + count(instance_cpu_time_ns) by (app) diff --git a/src/prometheus/docs/querying/functions.md b/src/prometheus/docs/querying/functions.md new file mode 100644 index 0000000..36e01fc --- /dev/null +++ b/src/prometheus/docs/querying/functions.md @@ -0,0 +1,402 @@ +--- +title: Query functions +nav_title: Functions +sort_rank: 3 +--- + +# Functions + +Some functions have default arguments, e.g. `year(v=vector(time()) +instant-vector)`. This means that there is one argument `v` which is an instant +vector, which if not provided it will default to the value of the expression +`vector(time())`. + +## `abs()` + +`abs(v instant-vector)` returns the input vector with all sample values converted to +their absolute value. + +## `absent()` + +`absent(v instant-vector)` returns an empty vector if the vector passed to it +has any elements and a 1-element vector with the value 1 if the vector passed to +it has no elements. + +This is useful for alerting on when no time series exist for a given metric name +and label combination. + +``` +absent(nonexistent{job="myjob"}) +# => {job="myjob"} + +absent(nonexistent{job="myjob",instance=~".*"}) +# => {job="myjob"} + +absent(sum(nonexistent{job="myjob"})) +# => {} +``` + +In the second example, `absent()` tries to be smart about deriving labels of the +1-element output vector from the input vector. + +## `ceil()` + +`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to +the nearest integer. + +## `changes()` + +For each input time series, `changes(v range-vector)` returns the number of +times its value has changed within the provided time range as an instant +vector. + +## `clamp_max()` + +`clamp_max(v instant-vector, max scalar)` clamps the sample values of all +elements in `v` to have an upper limit of `max`. + +## `clamp_min()` + +`clamp_min(v instant-vector, min scalar)` clamps the sample values of all +elements in `v` to have a lower limit of `min`. + +## `day_of_month()` + +`day_of_month(v=vector(time()) instant-vector)` returns the day of the month +for each of the given times in UTC. Returned values are from 1 to 31. + +## `day_of_week()` + +`day_of_week(v=vector(time()) instant-vector)` returns the day of the week for +each of the given times in UTC. Returned values are from 0 to 6, where 0 means +Sunday etc. + +## `days_in_month()` + +`days_in_month(v=vector(time()) instant-vector)` returns number of days in the +month for each of the given times in UTC. Returned values are from 28 to 31. + +## `delta()` + +`delta(v range-vector)` calculates the difference between the +first and last value of each time series element in a range vector `v`, +returning an instant vector with the given deltas and equivalent labels. +The delta is extrapolated to cover the full time range as specified in +the range vector selector, so that it is possible to get a non-integer +result even if the sample values are all integers. + +The following example expression returns the difference in CPU temperature +between now and 2 hours ago: + +``` +delta(cpu_temp_celsius{host="zeus"}[2h]) +``` + +`delta` should only be used with gauges. + +## `deriv()` + +`deriv(v range-vector)` calculates the per-second derivative of the time series in a range +vector `v`, using [simple linear regression](http://en.wikipedia.org/wiki/Simple_linear_regression). + +`deriv` should only be used with gauges. + +## `exp()` + +`exp(v instant-vector)` calculates the exponential function for all elements in `v`. +Special cases are: + +* `Exp(+Inf) = +Inf` +* `Exp(NaN) = NaN` + +## `floor()` + +`floor(v instant-vector)` rounds the sample values of all elements in `v` down +to the nearest integer. + +## `histogram_quantile()` + +`histogram_quantile(φ float, b instant-vector)` calculates the φ-quantile (0 ≤ φ +≤ 1) from the buckets `b` of a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). (See +[histograms and summaries](https://prometheus.io/docs/practices/histograms) for +a detailed explanation of φ-quantiles and the usage of the histogram metric type +in general.) The samples in `b` are the counts of observations in each bucket. +Each sample must have a label `le` where the label value denotes the inclusive +upper bound of the bucket. (Samples without such a label are silently ignored.) +The [histogram metric type](https://prometheus.io/docs/concepts/metric_types/#histogram) +automatically provides time series with the `_bucket` suffix and the appropriate +labels. + +Use the `rate()` function to specify the time window for the quantile +calculation. + +Example: A histogram metric is called `http_request_duration_seconds`. To +calculate the 90th percentile of request durations over the last 10m, use the +following expression: + + histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m])) + +The quantile is calculated for each label combination in +`http_request_duration_seconds`. To aggregate, use the `sum()` aggregator +around the `rate()` function. Since the `le` label is required by +`histogram_quantile()`, it has to be included in the `by` clause. The following +expression aggregates the 90th percentile by `job`: + + histogram_quantile(0.9, sum(rate(http_request_duration_seconds_bucket[10m])) by (job, le)) + +To aggregate everything, specify only the `le` label: + + histogram_quantile(0.9, sum(rate(http_request_duration_seconds_bucket[10m])) by (le)) + +The `histogram_quantile()` function interpolates quantile values by +assuming a linear distribution within a bucket. The highest bucket +must have an upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If +a quantile is located in the highest bucket, the upper bound of the +second highest bucket is returned. A lower limit of the lowest bucket +is assumed to be 0 if the upper bound of that bucket is greater than +0. In that case, the usual linear interpolation is applied within that +bucket. Otherwise, the upper bound of the lowest bucket is returned +for quantiles located in the lowest bucket. + +If `b` contains fewer than two buckets, `NaN` is returned. For φ < 0, `-Inf` is +returned. For φ > 1, `+Inf` is returned. + +## `holt_winters()` + +`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value +for time series based on the range in `v`. The lower the smoothing factor `sf`, +the more importance is given to old data. The higher the trend factor `tf`, the +more trends in the data is considered. Both `sf` and `tf` must be between 0 and +1. + +`holt_winters` should only be used with gauges. + +## `hour()` + +`hour(v=vector(time()) instant-vector)` returns the hour of the day +for each of the given times in UTC. Returned values are from 0 to 23. + +## `idelta()` + +`idelta(v range-vector)` + +`idelta(v range-vector)` calculates the difference between the last two samples +in the range vector `v`, returning an instant vector with the given deltas and +equivalent labels. + +`idelta` should only be used with gauges. + +## `increase()` + +`increase(v range-vector)` calculates the increase in the +time series in the range vector. Breaks in monotonicity (such as counter +resets due to target restarts) are automatically adjusted for. The +increase is extrapolated to cover the full time range as specified +in the range vector selector, so that it is possible to get a +non-integer result even if a counter increases only by integer +increments. + +The following example expression returns the number of HTTP requests as measured +over the last 5 minutes, per time series in the range vector: + +``` +increase(http_requests_total{job="api-server"}[5m]) +``` + +`increase` should only be used with counters. It is syntactic sugar +for `rate(v)` multiplied by the number of seconds under the specified +time range window, and should be used primarily for human readability. +Use `rate` in recording rules so that increases are tracked consistently +on a per-second basis. + +## `irate()` + +`irate(v range-vector)` calculates the per-second instant rate of increase of +the time series in the range vector. This is based on the last two data points. +Breaks in monotonicity (such as counter resets due to target restarts) are +automatically adjusted for. + +The following example expression returns the per-second rate of HTTP requests +looking up to 5 minutes back for the two most recent data points, per time +series in the range vector: + +``` +irate(http_requests_total{job="api-server"}[5m]) +``` + +`irate` should only be used when graphing volatile, fast-moving counters. +Use `rate` for alerts and slow-moving counters, as brief changes +in the rate can reset the `FOR` clause and graphs consisting entirely of rare +spikes are hard to read. + +Note that when combining `irate()` with an +[aggregation operator](operators.md#aggregation-operators) (e.g. `sum()`) +or a function aggregating over time (any function ending in `_over_time`), +always take a `irate()` first, then aggregate. Otherwise `irate()` cannot detect +counter resets when your target restarts. + +## `label_join()` + +For each timeseries in `v`, `label_join(v instant-vector, dst_label string, separator string, src_label_1 string, src_label_2 string, ...)` joins all the values of all the `src_labels` +using `separator` and returns the timeseries with the label `dst_label` containing the joined value. +There can be any number of `src_labels` in this function. + +This example will return a vector with each time series having a `foo` label with the value `a,b,c` added to it: + +``` +label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1", "src2", "src3") +``` + +## `label_replace()` + +For each timeseries in `v`, `label_replace(v instant-vector, dst_label string, +replacement string, src_label string, regex string)` matches the regular +expression `regex` against the label `src_label`. If it matches, then the +timeseries is returned with the label `dst_label` replaced by the expansion of +`replacement`. `$1` is replaced with the first matching subgroup, `$2` with the +second etc. If the regular expression doesn't match then the timeseries is +returned unchanged. + +This example will return a vector with each time series having a `foo` +label with the value `a` added to it: + +``` +label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*") +``` + +## `ln()` + +`ln(v instant-vector)` calculates the natural logarithm for all elements in `v`. +Special cases are: + +* `ln(+Inf) = +Inf` +* `ln(0) = -Inf` +* `ln(x < 0) = NaN` +* `ln(NaN) = NaN` + +## `log2()` + +`log2(v instant-vector)` calculates the binary logarithm for all elements in `v`. +The special cases are equivalent to those in `ln`. + +## `log10()` + +`log10(v instant-vector)` calculates the decimal logarithm for all elements in `v`. +The special cases are equivalent to those in `ln`. + +## `minute()` + +`minute(v=vector(time()) instant-vector)` returns the minute of the hour for each +of the given times in UTC. Returned values are from 0 to 59. + +## `month()` + +`month(v=vector(time()) instant-vector)` returns the month of the year for each +of the given times in UTC. Returned values are from 1 to 12, where 1 means +January etc. + +## `predict_linear()` + +`predict_linear(v range-vector, t scalar)` predicts the value of time series +`t` seconds from now, based on the range vector `v`, using [simple linear +regression](http://en.wikipedia.org/wiki/Simple_linear_regression). + +`predict_linear` should only be used with gauges. + +## `rate()` + +`rate(v range-vector)` calculates the per-second average rate of increase of the +time series in the range vector. Breaks in monotonicity (such as counter +resets due to target restarts) are automatically adjusted for. Also, the +calculation extrapolates to the ends of the time range, allowing for missed +scrapes or imperfect alignment of scrape cycles with the range's time period. + +The following example expression returns the per-second rate of HTTP requests as measured +over the last 5 minutes, per time series in the range vector: + +``` +rate(http_requests_total{job="api-server"}[5m]) +``` + +`rate` should only be used with counters. It is best suited for alerting, +and for graphing of slow-moving counters. + +Note that when combining `rate()` with an aggregation operator (e.g. `sum()`) +or a function aggregating over time (any function ending in `_over_time`), +always take a `rate()` first, then aggregate. Otherwise `rate()` cannot detect +counter resets when your target restarts. + +## `resets()` + +For each input time series, `resets(v range-vector)` returns the number of +counter resets within the provided time range as an instant vector. Any +decrease in the value between two consecutive samples is interpreted as a +counter reset. + +`resets` should only be used with counters. + +## `round()` + +`round(v instant-vector, to_nearest=1 scalar)` rounds the sample values of all +elements in `v` to the nearest integer. Ties are resolved by rounding up. The +optional `to_nearest` argument allows specifying the nearest multiple to which +the sample values should be rounded. This multiple may also be a fraction. + +## `scalar()` + +Given a single-element input vector, `scalar(v instant-vector)` returns the +sample value of that single element as a scalar. If the input vector does not +have exactly one element, `scalar` will return `NaN`. + +## `sort()` + +`sort(v instant-vector)` returns vector elements sorted by their sample values, +in ascending order. + +## `sort_desc()` + +Same as `sort`, but sorts in descending order. + +## `sqrt()` + +`sqrt(v instant-vector)` calculates the square root of all elements in `v`. + +## `time()` + +`time()` returns the number of seconds since January 1, 1970 UTC. Note that +this does not actually return the current time, but the time at which the +expression is to be evaluated. + +## `timestamp()` + +`timestamp(v instant-vector)` returns the timestamp of each of the samples of +the given vector as the number of seconds since January 1, 1970 UTC. + +*This function was added in Prometheus 2.0* + +## `vector()` + +`vector(s scalar)` returns the scalar `s` as a vector with no labels. + +## `year()` + +`year(v=vector(time()) instant-vector)` returns the year +for each of the given times in UTC. + +## `_over_time()` + +The following functions allow aggregating each series of a given range vector +over time and return an instant vector with per-series aggregation results: + +* `avg_over_time(range-vector)`: the average value of all points in the specified interval. +* `min_over_time(range-vector)`: the minimum value of all points in the specified interval. +* `max_over_time(range-vector)`: the maximum value of all points in the specified interval. +* `sum_over_time(range-vector)`: the sum of all values in the specified interval. +* `count_over_time(range-vector)`: the count of all values in the specified interval. +* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. +* `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. +* `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. + +Note that all values in the specified interval have the same weight in the +aggregation even if the values are not equally spaced throughout the interval. diff --git a/src/prometheus/docs/querying/index.md b/src/prometheus/docs/querying/index.md new file mode 100644 index 0000000..1566750 --- /dev/null +++ b/src/prometheus/docs/querying/index.md @@ -0,0 +1,4 @@ +--- +title: Querying +sort_rank: 4 +--- diff --git a/src/prometheus/docs/querying/operators.md b/src/prometheus/docs/querying/operators.md new file mode 100644 index 0000000..71adbb8 --- /dev/null +++ b/src/prometheus/docs/querying/operators.md @@ -0,0 +1,256 @@ +--- +title: Operators +sort_rank: 2 +--- + +# Operators + +## Binary operators + +Prometheus's query language supports basic logical and arithmetic operators. +For operations between two instant vectors, the [matching behavior](#vector-matching) +can be modified. + +### Arithmetic binary operators + +The following binary arithmetic operators exist in Prometheus: + +* `+` (addition) +* `-` (subtraction) +* `*` (multiplication) +* `/` (division) +* `%` (modulo) +* `^` (power/exponentiation) + +Binary arithmetic operators are defined between scalar/scalar, vector/scalar, +and vector/vector value pairs. + +**Between two scalars**, the behavior is obvious: they evaluate to another +scalar that is the result of the operator applied to both scalar operands. + +**Between an instant vector and a scalar**, the operator is applied to the +value of every data sample in the vector. E.g. if a time series instant vector +is multiplied by 2, the result is another vector in which every sample value of +the original vector is multiplied by 2. + +**Between two instant vectors**, a binary arithmetic operator is applied to +each entry in the left-hand side vector and its [matching element](#vector-matching) +in the right-hand vector. The result is propagated into the result vector and the metric +name is dropped. Entries for which no matching entry in the right-hand vector can be +found are not part of the result. + +### Comparison binary operators + +The following binary comparison operators exist in Prometheus: + +* `==` (equal) +* `!=` (not-equal) +* `>` (greater-than) +* `<` (less-than) +* `>=` (greater-or-equal) +* `<=` (less-or-equal) + +Comparison operators are defined between scalar/scalar, vector/scalar, +and vector/vector value pairs. By default they filter. Their behaviour can be +modified by providing `bool` after the operator, which will return `0` or `1` +for the value rather than filtering. + +**Between two scalars**, the `bool` modifier must be provided and these +operators result in another scalar that is either `0` (`false`) or `1` +(`true`), depending on the comparison result. + +**Between an instant vector and a scalar**, these operators are applied to the +value of every data sample in the vector, and vector elements between which the +comparison result is `false` get dropped from the result vector. If the `bool` +modifier is provided, vector elements that would be dropped instead have the value +`0` and vector elements that would be kept have the value `1`. + +**Between two instant vectors**, these operators behave as a filter by default, +applied to matching entries. Vector elements for which the expression is not +true or which do not find a match on the other side of the expression get +dropped from the result, while the others are propagated into a result vector +with their original (left-hand side) metric names and label values. +If the `bool` modifier is provided, vector elements that would have been +dropped instead have the value `0` and vector elements that would be kept have +the value `1` with the left-hand side label values. + +### Logical/set binary operators + +These logical/set binary operators are only defined between instant vectors: + +* `and` (intersection) +* `or` (union) +* `unless` (complement) + +`vector1 and vector2` results in a vector consisting of the elements of +`vector1` for which there are elements in `vector2` with exactly matching +label sets. Other elements are dropped. The metric name and values are carried +over from the left-hand side vector. + +`vector1 or vector2` results in a vector that contains all original elements +(label sets + values) of `vector1` and additionally all elements of `vector2` +which do not have matching label sets in `vector1`. + +`vector1 unless vector2` results in a vector consisting of the elements of +`vector1` for which there are no elements in `vector2` with exactly matching +label sets. All matching elements in both vectors are dropped. + +## Vector matching + +Operations between vectors attempt to find a matching element in the right-hand side +vector for each entry in the left-hand side. There are two basic types of +matching behavior: One-to-one and many-to-one/one-to-many. + +### One-to-one vector matches + +**One-to-one** finds a unique pair of entries from each side of the operation. +In the default case, that is an operation following the format `vector1 vector2`. +Two entries match if they have the exact same set of labels and corresponding values. +The `ignoring` keyword allows ignoring certain labels when matching, while the +`on` keyword allows reducing the set of considered labels to a provided list: + + ignoring(
     element, so that
    +// line breaks and other returned whitespace is respected.
    +func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML {
    +	alertMetric := model.Metric{
    +		model.MetricNameLabel: alertMetricName,
    +		alertNameLabel:        model.LabelValue(r.name),
    +	}
    +
    +	labels := make(map[string]string, len(r.labels))
    +	for _, l := range r.labels {
    +		labels[l.Name] = html_template.HTMLEscapeString(l.Value)
    +	}
    +
    +	annotations := make(map[string]string, len(r.annotations))
    +	for _, l := range r.annotations {
    +		annotations[l.Name] = html_template.HTMLEscapeString(l.Value)
    +	}
    +
    +	ar := rulefmt.Rule{
    +		Alert:       fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name),
    +		Expr:        fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())),
    +		For:         model.Duration(r.holdDuration),
    +		Labels:      labels,
    +		Annotations: annotations,
    +	}
    +
    +	byt, err := yaml.Marshal(ar)
    +	if err != nil {
    +		return html_template.HTML(fmt.Sprintf("error marshalling alerting rule: %q", err.Error()))
    +	}
    +	return html_template.HTML(byt)
    +}
    diff --git a/src/prometheus/rules/alerting_test.go b/src/prometheus/rules/alerting_test.go
    new file mode 100644
    index 0000000..96c317e
    --- /dev/null
    +++ b/src/prometheus/rules/alerting_test.go
    @@ -0,0 +1,39 @@
    +// Copyright 2016 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package rules
    +
    +import (
    +	"testing"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/util/testutil"
    +)
    +
    +func TestAlertingRuleHTMLSnippet(t *testing.T) {
    +	expr, err := promql.ParseExpr(`foo{html="BOLD"}`)
    +	testutil.Ok(t, err)
    +	rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "BOLD"), labels.FromStrings("html", "BOLD"), nil)
    +
    +	const want = `alert: testrule
    +expr: foo{html="<b>BOLD<b>"}
    +labels:
    +  html: '<b>BOLD</b>'
    +annotations:
    +  html: '<b>BOLD</b>'
    +`
    +
    +	got := rule.HTMLSnippet("/test/prefix")
    +	testutil.Assert(t, want == got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
    +}
    diff --git a/src/prometheus/rules/manager.go b/src/prometheus/rules/manager.go
    new file mode 100644
    index 0000000..f48ba75
    --- /dev/null
    +++ b/src/prometheus/rules/manager.go
    @@ -0,0 +1,645 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package rules
    +
    +import (
    +	"context"
    +	"errors"
    +	"fmt"
    +	"math"
    +	"net/url"
    +	"sort"
    +	"sync"
    +	"time"
    +
    +	html_template "html/template"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/go-kit/kit/log/level"
    +	opentracing "github.com/opentracing/opentracing-go"
    +	"github.com/prometheus/client_golang/prometheus"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/rulefmt"
    +	"github.com/prometheus/prometheus/pkg/timestamp"
    +	"github.com/prometheus/prometheus/pkg/value"
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// Constants for instrumentation.
    +const namespace = "prometheus"
    +
    +var (
    +	evalDuration = prometheus.NewSummary(
    +		prometheus.SummaryOpts{
    +			Namespace: namespace,
    +			Name:      "rule_evaluation_duration_seconds",
    +			Help:      "The duration for a rule to execute.",
    +		},
    +	)
    +	evalFailures = prometheus.NewCounter(
    +		prometheus.CounterOpts{
    +			Namespace: namespace,
    +			Name:      "rule_evaluation_failures_total",
    +			Help:      "The total number of rule evaluation failures.",
    +		},
    +	)
    +	evalTotal = prometheus.NewCounter(
    +		prometheus.CounterOpts{
    +			Namespace: namespace,
    +			Name:      "rule_evaluations_total",
    +			Help:      "The total number of rule evaluations.",
    +		},
    +	)
    +	iterationDuration = prometheus.NewSummary(prometheus.SummaryOpts{
    +		Namespace:  namespace,
    +		Name:       "rule_group_duration_seconds",
    +		Help:       "The duration of rule group evaluations.",
    +		Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
    +	})
    +	iterationsMissed = prometheus.NewCounter(prometheus.CounterOpts{
    +		Namespace: namespace,
    +		Name:      "rule_group_iterations_missed_total",
    +		Help:      "The total number of rule group evaluations missed due to slow rule group evaluation.",
    +	})
    +	iterationsScheduled = prometheus.NewCounter(prometheus.CounterOpts{
    +		Namespace: namespace,
    +		Name:      "rule_group_iterations_total",
    +		Help:      "The total number of scheduled rule group evaluations, whether executed or missed.",
    +	})
    +	lastDuration = prometheus.NewDesc(
    +		prometheus.BuildFQName(namespace, "", "rule_group_last_duration_seconds"),
    +		"The duration of the last rule group evaluation.",
    +		[]string{"rule_group"},
    +		nil,
    +	)
    +	groupInterval = prometheus.NewDesc(
    +		prometheus.BuildFQName(namespace, "", "rule_group_interval_seconds"),
    +		"The interval of a rule group.",
    +		[]string{"rule_group"},
    +		nil,
    +	)
    +)
    +
    +func init() {
    +	prometheus.MustRegister(iterationDuration)
    +	prometheus.MustRegister(iterationsScheduled)
    +	prometheus.MustRegister(iterationsMissed)
    +	prometheus.MustRegister(evalFailures)
    +	prometheus.MustRegister(evalDuration)
    +}
    +
    +// QueryFunc processes PromQL queries.
    +type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, error)
    +
    +// EngineQueryFunc returns a new query function that executes instant queries against
    +// the given engine.
    +// It converts scaler into vector results.
    +func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
    +	return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
    +		q, err := engine.NewInstantQuery(q, qs, t)
    +		if err != nil {
    +			return nil, err
    +		}
    +		res := q.Exec(ctx)
    +		if res.Err != nil {
    +			return nil, res.Err
    +		}
    +		switch v := res.Value.(type) {
    +		case promql.Vector:
    +			return v, nil
    +		case promql.Scalar:
    +			return promql.Vector{promql.Sample{
    +				Point:  promql.Point(v),
    +				Metric: labels.Labels{},
    +			}}, nil
    +		default:
    +			return nil, fmt.Errorf("rule result is not a vector or scalar")
    +		}
    +	}
    +}
    +
    +// A Rule encapsulates a vector expression which is evaluated at a specified
    +// interval and acted upon (currently either recorded or used for alerting).
    +type Rule interface {
    +	Name() string
    +	// eval evaluates the rule, including any associated recording or alerting actions.
    +	Eval(context.Context, time.Time, QueryFunc, *url.URL) (promql.Vector, error)
    +	// String returns a human-readable string representation of the rule.
    +	String() string
    +
    +	SetEvaluationTime(time.Duration)
    +	GetEvaluationTime() time.Duration
    +	// HTMLSnippet returns a human-readable string representation of the rule,
    +	// decorated with HTML elements for use the web frontend.
    +	HTMLSnippet(pathPrefix string) html_template.HTML
    +}
    +
    +// Group is a set of rules that have a logical relation.
    +type Group struct {
    +	name                 string
    +	file                 string
    +	interval             time.Duration
    +	rules                []Rule
    +	seriesInPreviousEval []map[string]labels.Labels // One per Rule.
    +	opts                 *ManagerOptions
    +	evaluationTime       time.Duration
    +	mtx                  sync.Mutex
    +
    +	done       chan struct{}
    +	terminated chan struct{}
    +
    +	logger log.Logger
    +}
    +
    +// NewGroup makes a new Group with the given name, options, and rules.
    +func NewGroup(name, file string, interval time.Duration, rules []Rule, opts *ManagerOptions) *Group {
    +	return &Group{
    +		name:                 name,
    +		file:                 file,
    +		interval:             interval,
    +		rules:                rules,
    +		opts:                 opts,
    +		seriesInPreviousEval: make([]map[string]labels.Labels, len(rules)),
    +		done:                 make(chan struct{}),
    +		terminated:           make(chan struct{}),
    +		logger:               log.With(opts.Logger, "group", name),
    +	}
    +}
    +
    +// Name returns the group name.
    +func (g *Group) Name() string { return g.name }
    +
    +// File returns the group's file.
    +func (g *Group) File() string { return g.file }
    +
    +// Rules returns the group's rules.
    +func (g *Group) Rules() []Rule { return g.rules }
    +
    +func (g *Group) run(ctx context.Context) {
    +	defer close(g.terminated)
    +
    +	// Wait an initial amount to have consistently slotted intervals.
    +	evalTimestamp := g.evalTimestamp().Add(g.interval)
    +	select {
    +	case <-time.After(time.Until(evalTimestamp)):
    +	case <-g.done:
    +		return
    +	}
    +
    +	iter := func() {
    +		iterationsScheduled.Inc()
    +
    +		start := time.Now()
    +		g.Eval(ctx, evalTimestamp)
    +		timeSinceStart := time.Since(start)
    +
    +		iterationDuration.Observe(timeSinceStart.Seconds())
    +		g.SetEvaluationTime(timeSinceStart)
    +	}
    +
    +	// The assumption here is that since the ticker was started after having
    +	// waited for `evalTimestamp` to pass, the ticks will trigger soon
    +	// after each `evalTimestamp + N * g.interval` occurrence.
    +	tick := time.NewTicker(g.interval)
    +	defer tick.Stop()
    +
    +	iter()
    +	for {
    +		select {
    +		case <-g.done:
    +			return
    +		default:
    +			select {
    +			case <-g.done:
    +				return
    +			case <-tick.C:
    +				missed := (time.Since(evalTimestamp) / g.interval) - 1
    +				if missed > 0 {
    +					iterationsMissed.Add(float64(missed))
    +					iterationsScheduled.Add(float64(missed))
    +				}
    +				evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
    +				iter()
    +			}
    +		}
    +	}
    +}
    +
    +func (g *Group) stop() {
    +	close(g.done)
    +	<-g.terminated
    +}
    +
    +func (g *Group) hash() uint64 {
    +	l := labels.New(
    +		labels.Label{"name", g.name},
    +		labels.Label{"file", g.file},
    +	)
    +	return l.Hash()
    +}
    +
    +// GetEvaluationTime returns the time in seconds it took to evaluate the rule group.
    +func (g *Group) GetEvaluationTime() time.Duration {
    +	g.mtx.Lock()
    +	defer g.mtx.Unlock()
    +	return g.evaluationTime
    +}
    +
    +// SetEvaluationTime sets the time in seconds the last evaluation took.
    +func (g *Group) SetEvaluationTime(dur time.Duration) {
    +	g.mtx.Lock()
    +	defer g.mtx.Unlock()
    +	g.evaluationTime = dur
    +}
    +
    +// evalTimestamp returns the immediately preceding consistently slotted evaluation time.
    +func (g *Group) evalTimestamp() time.Time {
    +	var (
    +		offset = int64(g.hash() % uint64(g.interval))
    +		now    = time.Now().UnixNano()
    +		adjNow = now - offset
    +		base   = adjNow - (adjNow % int64(g.interval))
    +	)
    +
    +	return time.Unix(0, base+offset)
    +}
    +
    +// copyState copies the alerting rule and staleness related state from the given group.
    +//
    +// Rules are matched based on their name. If there are duplicates, the
    +// first is matched with the first, second with the second etc.
    +func (g *Group) copyState(from *Group) {
    +	g.evaluationTime = from.evaluationTime
    +
    +	ruleMap := make(map[string][]int, len(from.rules))
    +
    +	for fi, fromRule := range from.rules {
    +		l := ruleMap[fromRule.Name()]
    +		ruleMap[fromRule.Name()] = append(l, fi)
    +	}
    +
    +	for i, rule := range g.rules {
    +		indexes := ruleMap[rule.Name()]
    +		if len(indexes) == 0 {
    +			continue
    +		}
    +		fi := indexes[0]
    +		g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
    +		ruleMap[rule.Name()] = indexes[1:]
    +
    +		ar, ok := rule.(*AlertingRule)
    +		if !ok {
    +			continue
    +		}
    +		far, ok := from.rules[fi].(*AlertingRule)
    +		if !ok {
    +			continue
    +		}
    +
    +		for fp, a := range far.active {
    +			ar.active[fp] = a
    +		}
    +	}
    +}
    +
    +// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
    +func (g *Group) Eval(ctx context.Context, ts time.Time) {
    +	for i, rule := range g.rules {
    +		select {
    +		case <-g.done:
    +			return
    +		default:
    +		}
    +
    +		func(i int, rule Rule) {
    +			sp, ctx := opentracing.StartSpanFromContext(ctx, "rule")
    +			sp.SetTag("name", rule.Name())
    +			defer func(t time.Time) {
    +				sp.Finish()
    +				evalDuration.Observe(time.Since(t).Seconds())
    +				rule.SetEvaluationTime(time.Since(t))
    +			}(time.Now())
    +
    +			evalTotal.Inc()
    +
    +			vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
    +			if err != nil {
    +				// Canceled queries are intentional termination of queries. This normally
    +				// happens on shutdown and thus we skip logging of any errors here.
    +				if _, ok := err.(promql.ErrQueryCanceled); !ok {
    +					level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err)
    +				}
    +				evalFailures.Inc()
    +				return
    +			}
    +
    +			if ar, ok := rule.(*AlertingRule); ok {
    +				g.opts.NotifyFunc(ctx, ar.vector.String(), ar.currentAlerts()...)
    +			}
    +			var (
    +				numOutOfOrder = 0
    +				numDuplicates = 0
    +			)
    +
    +			app, err := g.opts.Appendable.Appender()
    +			if err != nil {
    +				level.Warn(g.logger).Log("msg", "creating appender failed", "err", err)
    +				return
    +			}
    +
    +			seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
    +			for _, s := range vector {
    +				if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
    +					switch err {
    +					case storage.ErrOutOfOrderSample:
    +						numOutOfOrder++
    +						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
    +					case storage.ErrDuplicateSampleForTimestamp:
    +						numDuplicates++
    +						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
    +					default:
    +						level.Warn(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
    +					}
    +				} else {
    +					seriesReturned[s.Metric.String()] = s.Metric
    +				}
    +			}
    +			if numOutOfOrder > 0 {
    +				level.Warn(g.logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
    +			}
    +			if numDuplicates > 0 {
    +				level.Warn(g.logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
    +			}
    +
    +			for metric, lset := range g.seriesInPreviousEval[i] {
    +				if _, ok := seriesReturned[metric]; !ok {
    +					// Series no longer exposed, mark it stale.
    +					_, err = app.Add(lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
    +					switch err {
    +					case nil:
    +					case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
    +						// Do not count these in logging, as this is expected if series
    +						// is exposed from a different rule.
    +					default:
    +						level.Warn(g.logger).Log("msg", "adding stale sample failed", "sample", metric, "err", err)
    +					}
    +				}
    +			}
    +			if err := app.Commit(); err != nil {
    +				level.Warn(g.logger).Log("msg", "rule sample appending failed", "err", err)
    +			} else {
    +				g.seriesInPreviousEval[i] = seriesReturned
    +			}
    +		}(i, rule)
    +	}
    +}
    +
    +// The Manager manages recording and alerting rules.
    +type Manager struct {
    +	opts   *ManagerOptions
    +	groups map[string]*Group
    +	mtx    sync.RWMutex
    +	block  chan struct{}
    +
    +	logger log.Logger
    +}
    +
    +// Appendable returns an Appender.
    +type Appendable interface {
    +	Appender() (storage.Appender, error)
    +}
    +
    +// NotifyFunc sends notifications about a set of alerts generated by the given expression.
    +type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert) error
    +
    +// ManagerOptions bundles options for the Manager.
    +type ManagerOptions struct {
    +	ExternalURL *url.URL
    +	QueryFunc   QueryFunc
    +	NotifyFunc  NotifyFunc
    +	Context     context.Context
    +	Appendable  Appendable
    +	Logger      log.Logger
    +	Registerer  prometheus.Registerer
    +}
    +
    +// NewManager returns an implementation of Manager, ready to be started
    +// by calling the Run method.
    +func NewManager(o *ManagerOptions) *Manager {
    +	m := &Manager{
    +		groups: map[string]*Group{},
    +		opts:   o,
    +		block:  make(chan struct{}),
    +		logger: o.Logger,
    +	}
    +	if o.Registerer != nil {
    +		o.Registerer.MustRegister(m)
    +	}
    +	return m
    +}
    +
    +// Run starts processing of the rule manager.
    +func (m *Manager) Run() {
    +	close(m.block)
    +}
    +
    +// Stop the rule manager's rule evaluation cycles.
    +func (m *Manager) Stop() {
    +	m.mtx.Lock()
    +	defer m.mtx.Unlock()
    +
    +	level.Info(m.logger).Log("msg", "Stopping rule manager...")
    +
    +	for _, eg := range m.groups {
    +		eg.stop()
    +	}
    +
    +	level.Info(m.logger).Log("msg", "Rule manager stopped")
    +}
    +
    +// Update the rule manager's state as the config requires. If
    +// loading the new rules failed the old rule set is restored.
    +func (m *Manager) Update(interval time.Duration, files []string) error {
    +	m.mtx.Lock()
    +	defer m.mtx.Unlock()
    +
    +	// To be replaced with a configurable per-group interval.
    +	groups, errs := m.loadGroups(interval, files...)
    +	if errs != nil {
    +		for _, e := range errs {
    +			level.Error(m.logger).Log("msg", "loading groups failed", "err", e)
    +		}
    +		return errors.New("error loading rules, previous rule set restored")
    +	}
    +
    +	var wg sync.WaitGroup
    +
    +	for _, newg := range groups {
    +		wg.Add(1)
    +
    +		// If there is an old group with the same identifier, stop it and wait for
    +		// it to finish the current iteration. Then copy it into the new group.
    +		gn := groupKey(newg.name, newg.file)
    +		oldg, ok := m.groups[gn]
    +		delete(m.groups, gn)
    +
    +		go func(newg *Group) {
    +			if ok {
    +				oldg.stop()
    +				newg.copyState(oldg)
    +			}
    +			go func() {
    +				// Wait with starting evaluation until the rule manager
    +				// is told to run. This is necessary to avoid running
    +				// queries against a bootstrapping storage.
    +				<-m.block
    +				newg.run(m.opts.Context)
    +			}()
    +			wg.Done()
    +		}(newg)
    +	}
    +
    +	// Stop remaining old groups.
    +	for _, oldg := range m.groups {
    +		oldg.stop()
    +	}
    +
    +	wg.Wait()
    +	m.groups = groups
    +
    +	return nil
    +}
    +
    +// loadGroups reads groups from a list of files.
    +// As there's currently no group syntax a single group named "default" containing
    +// all rules will be returned.
    +func (m *Manager) loadGroups(interval time.Duration, filenames ...string) (map[string]*Group, []error) {
    +	groups := make(map[string]*Group)
    +
    +	for _, fn := range filenames {
    +		rgs, errs := rulefmt.ParseFile(fn)
    +		if errs != nil {
    +			return nil, errs
    +		}
    +
    +		for _, rg := range rgs.Groups {
    +			itv := interval
    +			if rg.Interval != 0 {
    +				itv = time.Duration(rg.Interval)
    +			}
    +
    +			rules := make([]Rule, 0, len(rg.Rules))
    +			for _, r := range rg.Rules {
    +				expr, err := promql.ParseExpr(r.Expr)
    +				if err != nil {
    +					return nil, []error{err}
    +				}
    +
    +				if r.Alert != "" {
    +					rules = append(rules, NewAlertingRule(
    +						r.Alert,
    +						expr,
    +						time.Duration(r.For),
    +						labels.FromMap(r.Labels),
    +						labels.FromMap(r.Annotations),
    +						log.With(m.logger, "alert", r.Alert),
    +					))
    +					continue
    +				}
    +				rules = append(rules, NewRecordingRule(
    +					r.Record,
    +					expr,
    +					labels.FromMap(r.Labels),
    +				))
    +			}
    +
    +			groups[groupKey(rg.Name, fn)] = NewGroup(rg.Name, fn, itv, rules, m.opts)
    +		}
    +	}
    +
    +	return groups, nil
    +}
    +
    +// Group names need not be unique across filenames.
    +func groupKey(name, file string) string {
    +	return name + ";" + file
    +}
    +
    +// RuleGroups returns the list of manager's rule groups.
    +func (m *Manager) RuleGroups() []*Group {
    +	m.mtx.RLock()
    +	defer m.mtx.RUnlock()
    +
    +	rgs := make([]*Group, 0, len(m.groups))
    +	for _, g := range m.groups {
    +		rgs = append(rgs, g)
    +	}
    +
    +	sort.Slice(rgs, func(i, j int) bool {
    +		return rgs[i].file < rgs[j].file && rgs[i].name < rgs[j].name
    +	})
    +
    +	return rgs
    +}
    +
    +// Rules returns the list of the manager's rules.
    +func (m *Manager) Rules() []Rule {
    +	m.mtx.RLock()
    +	defer m.mtx.RUnlock()
    +
    +	var rules []Rule
    +	for _, g := range m.groups {
    +		rules = append(rules, g.rules...)
    +	}
    +
    +	return rules
    +}
    +
    +// AlertingRules returns the list of the manager's alerting rules.
    +func (m *Manager) AlertingRules() []*AlertingRule {
    +	m.mtx.RLock()
    +	defer m.mtx.RUnlock()
    +
    +	alerts := []*AlertingRule{}
    +	for _, rule := range m.Rules() {
    +		if alertingRule, ok := rule.(*AlertingRule); ok {
    +			alerts = append(alerts, alertingRule)
    +		}
    +	}
    +	return alerts
    +}
    +
    +// Describe implements prometheus.Collector.
    +func (m *Manager) Describe(ch chan<- *prometheus.Desc) {
    +	ch <- lastDuration
    +	ch <- groupInterval
    +}
    +
    +// Collect implements prometheus.Collector.
    +func (m *Manager) Collect(ch chan<- prometheus.Metric) {
    +	for _, g := range m.RuleGroups() {
    +		ch <- prometheus.MustNewConstMetric(lastDuration,
    +			prometheus.GaugeValue,
    +			g.GetEvaluationTime().Seconds(),
    +			groupKey(g.file, g.name))
    +	}
    +	for _, g := range m.RuleGroups() {
    +		ch <- prometheus.MustNewConstMetric(groupInterval,
    +			prometheus.GaugeValue,
    +			g.interval.Seconds(),
    +			groupKey(g.file, g.name))
    +	}
    +}
    diff --git a/src/prometheus/rules/manager_test.go b/src/prometheus/rules/manager_test.go
    new file mode 100644
    index 0000000..4ffb537
    --- /dev/null
    +++ b/src/prometheus/rules/manager_test.go
    @@ -0,0 +1,314 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package rules
    +
    +import (
    +	"context"
    +	"math"
    +	"sort"
    +	"testing"
    +	"time"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/prometheus/common/model"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/timestamp"
    +	"github.com/prometheus/prometheus/pkg/value"
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/storage"
    +	"github.com/prometheus/prometheus/util/testutil"
    +)
    +
    +func TestAlertingRule(t *testing.T) {
    +	suite, err := promql.NewTest(t, `
    +		load 5m
    +			http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75 85  95 105 105  95  85
    +			http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	80 90 100 110 120 130 140
    +	`)
    +	testutil.Ok(t, err)
    +	defer suite.Close()
    +
    +	err = suite.Run()
    +	testutil.Ok(t, err)
    +
    +	expr, err := promql.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
    +	testutil.Ok(t, err)
    +
    +	rule := NewAlertingRule(
    +		"HTTPRequestRateLow",
    +		expr,
    +		time.Minute,
    +		labels.FromStrings("severity", "{{\"c\"}}ritical"),
    +		nil, nil,
    +	)
    +	result := promql.Vector{
    +		{
    +			Metric: labels.FromStrings(
    +				"__name__", "ALERTS",
    +				"alertname", "HTTPRequestRateLow",
    +				"alertstate", "pending",
    +				"group", "canary",
    +				"instance", "0",
    +				"job", "app-server",
    +				"severity", "critical",
    +			),
    +			Point: promql.Point{V: 1},
    +		},
    +		{
    +			Metric: labels.FromStrings(
    +				"__name__", "ALERTS",
    +				"alertname", "HTTPRequestRateLow",
    +				"alertstate", "pending",
    +				"group", "canary",
    +				"instance", "1",
    +				"job", "app-server",
    +				"severity", "critical",
    +			),
    +			Point: promql.Point{V: 1},
    +		},
    +		{
    +			Metric: labels.FromStrings(
    +				"__name__", "ALERTS",
    +				"alertname", "HTTPRequestRateLow",
    +				"alertstate", "firing",
    +				"group", "canary",
    +				"instance", "0",
    +				"job", "app-server",
    +				"severity", "critical",
    +			),
    +			Point: promql.Point{V: 1},
    +		},
    +		{
    +			Metric: labels.FromStrings(
    +				"__name__", "ALERTS",
    +				"alertname", "HTTPRequestRateLow",
    +				"alertstate", "firing",
    +				"group", "canary",
    +				"instance", "1",
    +				"job", "app-server",
    +				"severity", "critical",
    +			),
    +			Point: promql.Point{V: 1},
    +		},
    +	}
    +
    +	baseTime := time.Unix(0, 0)
    +
    +	var tests = []struct {
    +		time   time.Duration
    +		result promql.Vector
    +	}{
    +		{
    +			time:   0,
    +			result: result[:2],
    +		}, {
    +			time:   5 * time.Minute,
    +			result: result[2:],
    +		}, {
    +			time:   10 * time.Minute,
    +			result: result[2:3],
    +		},
    +		{
    +			time:   15 * time.Minute,
    +			result: nil,
    +		},
    +		{
    +			time:   20 * time.Minute,
    +			result: nil,
    +		},
    +		{
    +			time:   25 * time.Minute,
    +			result: result[:1],
    +		},
    +		{
    +			time:   30 * time.Minute,
    +			result: result[2:3],
    +		},
    +	}
    +
    +	for i, test := range tests {
    +		t.Logf("case %d", i)
    +
    +		evalTime := baseTime.Add(test.time)
    +
    +		res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
    +		testutil.Ok(t, err)
    +
    +		for i := range test.result {
    +			test.result[i].T = timestamp.FromTime(evalTime)
    +		}
    +		testutil.Assert(t, len(test.result) == len(res), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
    +
    +		sort.Slice(res, func(i, j int) bool {
    +			return labels.Compare(res[i].Metric, res[j].Metric) < 0
    +		})
    +		testutil.Equals(t, test.result, res)
    +
    +		for _, aa := range rule.ActiveAlerts() {
    +			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
    +		}
    +	}
    +}
    +
    +func TestStaleness(t *testing.T) {
    +	storage := testutil.NewStorage(t)
    +	defer storage.Close()
    +	engine := promql.NewEngine(nil, nil, 10, 10*time.Second)
    +	opts := &ManagerOptions{
    +		QueryFunc:  EngineQueryFunc(engine, storage),
    +		Appendable: storage,
    +		Context:    context.Background(),
    +		Logger:     log.NewNopLogger(),
    +	}
    +
    +	expr, err := promql.ParseExpr("a + 1")
    +	testutil.Ok(t, err)
    +	rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
    +	group := NewGroup("default", "", time.Second, []Rule{rule}, opts)
    +
    +	// A time series that has two samples and then goes stale.
    +	app, _ := storage.Appender()
    +	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
    +	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
    +	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
    +
    +	err = app.Commit()
    +	testutil.Ok(t, err)
    +
    +	ctx := context.Background()
    +
    +	// Execute 3 times, 1 second apart.
    +	group.Eval(ctx, time.Unix(0, 0))
    +	group.Eval(ctx, time.Unix(1, 0))
    +	group.Eval(ctx, time.Unix(2, 0))
    +
    +	querier, err := storage.Querier(context.Background(), 0, 2000)
    +	testutil.Ok(t, err)
    +	defer querier.Close()
    +
    +	matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
    +	testutil.Ok(t, err)
    +
    +	set, err := querier.Select(nil, matcher)
    +	testutil.Ok(t, err)
    +
    +	samples, err := readSeriesSet(set)
    +	testutil.Ok(t, err)
    +
    +	metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
    +	metricSample, ok := samples[metric]
    +
    +	testutil.Assert(t, ok, "Series %s not returned.", metric)
    +	testutil.Assert(t, value.IsStaleNaN(metricSample[2].V), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].V))
    +	metricSample[2].V = 42 // reflect.DeepEqual cannot handle NaN.
    +
    +	want := map[string][]promql.Point{
    +		metric: []promql.Point{{0, 2}, {1000, 3}, {2000, 42}},
    +	}
    +
    +	testutil.Equals(t, want, samples)
    +}
    +
    +// Convert a SeriesSet into a form useable with reflect.DeepEqual.
    +func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
    +	result := map[string][]promql.Point{}
    +
    +	for ss.Next() {
    +		series := ss.At()
    +
    +		points := []promql.Point{}
    +		it := series.Iterator()
    +		for it.Next() {
    +			t, v := it.At()
    +			points = append(points, promql.Point{T: t, V: v})
    +		}
    +
    +		name := series.Labels().String()
    +		result[name] = points
    +	}
    +	return result, ss.Err()
    +}
    +
    +func TestCopyState(t *testing.T) {
    +	oldGroup := &Group{
    +		rules: []Rule{
    +			NewAlertingRule("alert", nil, 0, nil, nil, nil),
    +			NewRecordingRule("rule1", nil, nil),
    +			NewRecordingRule("rule2", nil, nil),
    +			NewRecordingRule("rule3", nil, nil),
    +			NewRecordingRule("rule3", nil, nil),
    +		},
    +		seriesInPreviousEval: []map[string]labels.Labels{
    +			map[string]labels.Labels{"a": nil},
    +			map[string]labels.Labels{"r1": nil},
    +			map[string]labels.Labels{"r2": nil},
    +			map[string]labels.Labels{"r3a": nil},
    +			map[string]labels.Labels{"r3b": nil},
    +		},
    +		evaluationTime: time.Second,
    +	}
    +	oldGroup.rules[0].(*AlertingRule).active[42] = nil
    +	newGroup := &Group{
    +		rules: []Rule{
    +			NewRecordingRule("rule3", nil, nil),
    +			NewRecordingRule("rule3", nil, nil),
    +			NewRecordingRule("rule3", nil, nil),
    +			NewAlertingRule("alert", nil, 0, nil, nil, nil),
    +			NewRecordingRule("rule1", nil, nil),
    +			NewRecordingRule("rule4", nil, nil),
    +		},
    +		seriesInPreviousEval: make([]map[string]labels.Labels, 6),
    +	}
    +	newGroup.copyState(oldGroup)
    +
    +	want := []map[string]labels.Labels{
    +		map[string]labels.Labels{"r3a": nil},
    +		map[string]labels.Labels{"r3b": nil},
    +		nil,
    +		map[string]labels.Labels{"a": nil},
    +		map[string]labels.Labels{"r1": nil},
    +		nil,
    +	}
    +	testutil.Equals(t, want, newGroup.seriesInPreviousEval)
    +	testutil.Equals(t, oldGroup.rules[0], newGroup.rules[3])
    +	testutil.Equals(t, oldGroup.evaluationTime, newGroup.evaluationTime)
    +}
    +
    +func TestUpdate(t *testing.T) {
    +	expected := map[string]labels.Labels{
    +		"test": labels.FromStrings("name", "value"),
    +	}
    +	ruleManager := NewManager(&ManagerOptions{
    +		Context: context.Background(),
    +		Logger:  log.NewNopLogger(),
    +	})
    +	ruleManager.Run()
    +
    +	err := ruleManager.Update(0, nil)
    +	testutil.Ok(t, err)
    +	for _, g := range ruleManager.groups {
    +		g.seriesInPreviousEval = []map[string]labels.Labels{
    +			expected,
    +		}
    +	}
    +
    +	err = ruleManager.Update(0, nil)
    +	testutil.Ok(t, err)
    +	for _, g := range ruleManager.groups {
    +		for _, actual := range g.seriesInPreviousEval {
    +			testutil.Equals(t, expected, actual)
    +		}
    +	}
    +}
    diff --git a/src/prometheus/rules/recording.go b/src/prometheus/rules/recording.go
    new file mode 100644
    index 0000000..438f787
    --- /dev/null
    +++ b/src/prometheus/rules/recording.go
    @@ -0,0 +1,132 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package rules
    +
    +import (
    +	"context"
    +	"fmt"
    +	"html/template"
    +	"net/url"
    +	"sync"
    +	"time"
    +
    +	yaml "gopkg.in/yaml.v2"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/rulefmt"
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/util/strutil"
    +)
    +
    +// A RecordingRule records its vector expression into new timeseries.
    +type RecordingRule struct {
    +	name           string
    +	vector         promql.Expr
    +	labels         labels.Labels
    +	mtx            sync.Mutex
    +	evaluationTime time.Duration
    +}
    +
    +// NewRecordingRule returns a new recording rule.
    +func NewRecordingRule(name string, vector promql.Expr, lset labels.Labels) *RecordingRule {
    +	return &RecordingRule{
    +		name:   name,
    +		vector: vector,
    +		labels: lset,
    +	}
    +}
    +
    +// Name returns the rule name.
    +func (rule *RecordingRule) Name() string {
    +	return rule.name
    +}
    +
    +// Eval evaluates the rule and then overrides the metric names and labels accordingly.
    +func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL) (promql.Vector, error) {
    +	vector, err := query(ctx, rule.vector.String(), ts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	// Override the metric name and labels.
    +	for i := range vector {
    +		sample := &vector[i]
    +
    +		lb := labels.NewBuilder(sample.Metric)
    +
    +		lb.Set(labels.MetricName, rule.name)
    +
    +		for _, l := range rule.labels {
    +			if l.Value == "" {
    +				lb.Del(l.Name)
    +			} else {
    +				lb.Set(l.Name, l.Value)
    +			}
    +		}
    +
    +		sample.Metric = lb.Labels()
    +	}
    +
    +	return vector, nil
    +}
    +
    +func (rule *RecordingRule) String() string {
    +	r := rulefmt.Rule{
    +		Record: rule.name,
    +		Expr:   rule.vector.String(),
    +		Labels: rule.labels.Map(),
    +	}
    +
    +	byt, err := yaml.Marshal(r)
    +	if err != nil {
    +		return fmt.Sprintf("error marshalling recording rule: %q", err.Error())
    +	}
    +
    +	return string(byt)
    +}
    +
    +// SetEvaluationTime updates evaluationTimeSeconds to the time in seconds it took to evaluate the rule on its last evaluation.
    +func (rule *RecordingRule) SetEvaluationTime(dur time.Duration) {
    +	rule.mtx.Lock()
    +	defer rule.mtx.Unlock()
    +	rule.evaluationTime = dur
    +}
    +
    +// GetEvaluationTime returns the time in seconds it took to evaluate the recording rule.
    +func (rule *RecordingRule) GetEvaluationTime() time.Duration {
    +	rule.mtx.Lock()
    +	defer rule.mtx.Unlock()
    +	return rule.evaluationTime
    +}
    +
    +// HTMLSnippet returns an HTML snippet representing this rule.
    +func (rule *RecordingRule) HTMLSnippet(pathPrefix string) template.HTML {
    +	ruleExpr := rule.vector.String()
    +	labels := make(map[string]string, len(rule.labels))
    +	for _, l := range rule.labels {
    +		labels[l.Name] = template.HTMLEscapeString(l.Value)
    +	}
    +
    +	r := rulefmt.Rule{
    +		Record: fmt.Sprintf(`%s`, pathPrefix+strutil.TableLinkForExpression(rule.name), rule.name),
    +		Expr:   fmt.Sprintf(`%s`, pathPrefix+strutil.TableLinkForExpression(ruleExpr), template.HTMLEscapeString(ruleExpr)),
    +		Labels: labels,
    +	}
    +
    +	byt, err := yaml.Marshal(r)
    +	if err != nil {
    +		return template.HTML(fmt.Sprintf("error marshalling recording rule: %q", err.Error()))
    +	}
    +
    +	return template.HTML(byt)
    +}
    diff --git a/src/prometheus/rules/recording_test.go b/src/prometheus/rules/recording_test.go
    new file mode 100644
    index 0000000..4844741
    --- /dev/null
    +++ b/src/prometheus/rules/recording_test.go
    @@ -0,0 +1,84 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package rules
    +
    +import (
    +	"context"
    +	"testing"
    +	"time"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/timestamp"
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/util/testutil"
    +)
    +
    +func TestRuleEval(t *testing.T) {
    +	storage := testutil.NewStorage(t)
    +	defer storage.Close()
    +
    +	engine := promql.NewEngine(nil, nil, 10, 10*time.Second)
    +	ctx, cancelCtx := context.WithCancel(context.Background())
    +	defer cancelCtx()
    +
    +	now := time.Now()
    +
    +	suite := []struct {
    +		name   string
    +		expr   promql.Expr
    +		labels labels.Labels
    +		result promql.Vector
    +	}{
    +		{
    +			name:   "nolabels",
    +			expr:   &promql.NumberLiteral{Val: 1},
    +			labels: labels.Labels{},
    +			result: promql.Vector{promql.Sample{
    +				Metric: labels.FromStrings("__name__", "nolabels"),
    +				Point:  promql.Point{V: 1, T: timestamp.FromTime(now)},
    +			}},
    +		},
    +		{
    +			name:   "labels",
    +			expr:   &promql.NumberLiteral{Val: 1},
    +			labels: labels.FromStrings("foo", "bar"),
    +			result: promql.Vector{promql.Sample{
    +				Metric: labels.FromStrings("__name__", "labels", "foo", "bar"),
    +				Point:  promql.Point{V: 1, T: timestamp.FromTime(now)},
    +			}},
    +		},
    +	}
    +
    +	for _, test := range suite {
    +		rule := NewRecordingRule(test.name, test.expr, test.labels)
    +		result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
    +		testutil.Ok(t, err)
    +		testutil.Equals(t, result, test.result)
    +	}
    +}
    +
    +func TestRecordingRuleHTMLSnippet(t *testing.T) {
    +	expr, err := promql.ParseExpr(`foo{html="BOLD"}`)
    +	testutil.Ok(t, err)
    +	rule := NewRecordingRule("testrule", expr, labels.FromStrings("html", "BOLD"))
    +
    +	const want = `record: testrule
    +expr: foo{html="<b>BOLD<b>"}
    +labels:
    +  html: '<b>BOLD</b>'
    +`
    +
    +	got := rule.HTMLSnippet("/test/prefix")
    +	testutil.Assert(t, want == got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got)
    +}
    diff --git a/src/prometheus/scrape/helpers_test.go b/src/prometheus/scrape/helpers_test.go
    new file mode 100644
    index 0000000..f1e5471
    --- /dev/null
    +++ b/src/prometheus/scrape/helpers_test.go
    @@ -0,0 +1,70 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +type nopAppendable struct{}
    +
    +func (a nopAppendable) Appender() (storage.Appender, error) {
    +	return nopAppender{}, nil
    +}
    +
    +type nopAppender struct{}
    +
    +func (a nopAppender) Add(labels.Labels, int64, float64) (uint64, error)   { return 0, nil }
    +func (a nopAppender) AddFast(labels.Labels, uint64, int64, float64) error { return nil }
    +func (a nopAppender) Commit() error                                       { return nil }
    +func (a nopAppender) Rollback() error                                     { return nil }
    +
    +// collectResultAppender records all samples that were added through the appender.
    +// It can be used as its zero value or be backed by another appender it writes samples through.
    +type collectResultAppender struct {
    +	next   storage.Appender
    +	result []sample
    +}
    +
    +func (a *collectResultAppender) AddFast(m labels.Labels, ref uint64, t int64, v float64) error {
    +	if a.next == nil {
    +		return storage.ErrNotFound
    +	}
    +	err := a.next.AddFast(m, ref, t, v)
    +	if err != nil {
    +		return err
    +	}
    +	a.result = append(a.result, sample{
    +		metric: m,
    +		t:      t,
    +		v:      v,
    +	})
    +	return err
    +}
    +
    +func (a *collectResultAppender) Add(m labels.Labels, t int64, v float64) (uint64, error) {
    +	a.result = append(a.result, sample{
    +		metric: m,
    +		t:      t,
    +		v:      v,
    +	})
    +	if a.next == nil {
    +		return 0, nil
    +	}
    +	return a.next.Add(m, t, v)
    +}
    +
    +func (a *collectResultAppender) Commit() error   { return nil }
    +func (a *collectResultAppender) Rollback() error { return nil }
    diff --git a/src/prometheus/scrape/manager.go b/src/prometheus/scrape/manager.go
    new file mode 100644
    index 0000000..0f2c0d4
    --- /dev/null
    +++ b/src/prometheus/scrape/manager.go
    @@ -0,0 +1,173 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"fmt"
    +	"reflect"
    +	"sync"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/go-kit/kit/log/level"
    +
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/discovery/targetgroup"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// Appendable returns an Appender.
    +type Appendable interface {
    +	Appender() (storage.Appender, error)
    +}
    +
    +// NewManager is the Manager constructor
    +func NewManager(logger log.Logger, app Appendable) *Manager {
    +
    +	return &Manager{
    +		append:        app,
    +		logger:        logger,
    +		scrapeConfigs: make(map[string]*config.ScrapeConfig),
    +		scrapePools:   make(map[string]*scrapePool),
    +		graceShut:     make(chan struct{}),
    +		targetsAll:    make(map[string][]*Target),
    +	}
    +}
    +
    +// Manager maintains a set of scrape pools and manages start/stop cycles
    +// when receiving new target groups form the discovery manager.
    +type Manager struct {
    +	logger    log.Logger
    +	append    Appendable
    +	graceShut chan struct{}
    +
    +	mtxTargets     sync.Mutex // Guards the fields below.
    +	targetsActive  []*Target
    +	targetsDropped []*Target
    +	targetsAll     map[string][]*Target
    +
    +	mtxScrape     sync.Mutex // Guards the fields below.
    +	scrapeConfigs map[string]*config.ScrapeConfig
    +	scrapePools   map[string]*scrapePool
    +}
    +
    +// Run starts background processing to handle target updates and reload the scraping loops.
    +func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error {
    +	for {
    +		select {
    +		case ts := <-tsets:
    +			m.reload(ts)
    +		case <-m.graceShut:
    +			return nil
    +		}
    +	}
    +}
    +
    +// Stop cancels all running scrape pools and blocks until all have exited.
    +func (m *Manager) Stop() {
    +	m.mtxScrape.Lock()
    +	defer m.mtxScrape.Unlock()
    +
    +	for _, sp := range m.scrapePools {
    +		sp.stop()
    +	}
    +	close(m.graceShut)
    +}
    +
    +// ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg.
    +func (m *Manager) ApplyConfig(cfg *config.Config) error {
    +	m.mtxScrape.Lock()
    +	defer m.mtxScrape.Unlock()
    +
    +	c := make(map[string]*config.ScrapeConfig)
    +	for _, scfg := range cfg.ScrapeConfigs {
    +		c[scfg.JobName] = scfg
    +	}
    +	m.scrapeConfigs = c
    +
    +	// Cleanup and reload pool if config has changed.
    +	for name, sp := range m.scrapePools {
    +		if cfg, ok := m.scrapeConfigs[name]; !ok {
    +			sp.stop()
    +			delete(m.scrapePools, name)
    +		} else if !reflect.DeepEqual(sp.config, cfg) {
    +			sp.reload(cfg)
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +// TargetsAll returns active and dropped targets grouped by job_name.
    +func (m *Manager) TargetsAll() map[string][]*Target {
    +	m.mtxTargets.Lock()
    +	defer m.mtxTargets.Unlock()
    +	return m.targetsAll
    +}
    +
    +// TargetsActive returns the active targets currently being scraped.
    +func (m *Manager) TargetsActive() []*Target {
    +	m.mtxTargets.Lock()
    +	defer m.mtxTargets.Unlock()
    +	return m.targetsActive
    +}
    +
    +// TargetsDropped returns the dropped targets during relabelling.
    +func (m *Manager) TargetsDropped() []*Target {
    +	m.mtxTargets.Lock()
    +	defer m.mtxTargets.Unlock()
    +	return m.targetsDropped
    +}
    +
    +func (m *Manager) targetsUpdate(active, dropped map[string][]*Target) {
    +	m.mtxTargets.Lock()
    +	defer m.mtxTargets.Unlock()
    +
    +	m.targetsAll = make(map[string][]*Target)
    +	m.targetsActive = nil
    +	m.targetsDropped = nil
    +	for jobName, targets := range active {
    +		m.targetsAll[jobName] = append(m.targetsAll[jobName], targets...)
    +		m.targetsActive = append(m.targetsActive, targets...)
    +
    +	}
    +	for jobName, targets := range dropped {
    +		m.targetsAll[jobName] = append(m.targetsAll[jobName], targets...)
    +		m.targetsDropped = append(m.targetsDropped, targets...)
    +	}
    +}
    +
    +func (m *Manager) reload(t map[string][]*targetgroup.Group) {
    +	m.mtxScrape.Lock()
    +	defer m.mtxScrape.Unlock()
    +
    +	tDropped := make(map[string][]*Target)
    +	tActive := make(map[string][]*Target)
    +
    +	for tsetName, tgroup := range t {
    +		var sp *scrapePool
    +		if existing, ok := m.scrapePools[tsetName]; !ok {
    +			scrapeConfig, ok := m.scrapeConfigs[tsetName]
    +			if !ok {
    +				level.Error(m.logger).Log("msg", "error reloading target set", "err", fmt.Sprintf("invalid config id:%v", tsetName))
    +				continue
    +			}
    +			sp = newScrapePool(scrapeConfig, m.append, log.With(m.logger, "scrape_pool", tsetName))
    +			m.scrapePools[tsetName] = sp
    +		} else {
    +			sp = existing
    +		}
    +		tActive[tsetName], tDropped[tsetName] = sp.Sync(tgroup)
    +	}
    +	m.targetsUpdate(tActive, tDropped)
    +}
    diff --git a/src/prometheus/scrape/manager_test.go b/src/prometheus/scrape/manager_test.go
    new file mode 100644
    index 0000000..07b67c6
    --- /dev/null
    +++ b/src/prometheus/scrape/manager_test.go
    @@ -0,0 +1,267 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"fmt"
    +	"testing"
    +	"time"
    +
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/discovery/targetgroup"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/util/testutil"
    +)
    +
    +func mustNewRegexp(s string) config.Regexp {
    +	re, err := config.NewRegexp(s)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return re
    +}
    +
    +func TestPopulateLabels(t *testing.T) {
    +	cases := []struct {
    +		in      labels.Labels
    +		cfg     *config.ScrapeConfig
    +		res     labels.Labels
    +		resOrig labels.Labels
    +		err     error
    +	}{
    +		// Regular population of scrape config options.
    +		{
    +			in: labels.FromMap(map[string]string{
    +				model.AddressLabel: "1.2.3.4:1000",
    +				"custom":           "value",
    +			}),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +			},
    +			res: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "1.2.3.4:1000",
    +				model.InstanceLabel:    "1.2.3.4:1000",
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +				"custom":               "value",
    +			}),
    +			resOrig: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "1.2.3.4:1000",
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +				"custom":               "value",
    +			}),
    +		},
    +		// Pre-define/overwrite scrape config labels.
    +		// Leave out port and expect it to be defaulted to scheme.
    +		{
    +			in: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "1.2.3.4",
    +				model.SchemeLabel:      "http",
    +				model.MetricsPathLabel: "/custom",
    +				model.JobLabel:         "custom-job",
    +			}),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +			},
    +			res: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "1.2.3.4:80",
    +				model.InstanceLabel:    "1.2.3.4:80",
    +				model.SchemeLabel:      "http",
    +				model.MetricsPathLabel: "/custom",
    +				model.JobLabel:         "custom-job",
    +			}),
    +			resOrig: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "1.2.3.4",
    +				model.SchemeLabel:      "http",
    +				model.MetricsPathLabel: "/custom",
    +				model.JobLabel:         "custom-job",
    +			}),
    +		},
    +		// Provide instance label. HTTPS port default for IPv6.
    +		{
    +			in: labels.FromMap(map[string]string{
    +				model.AddressLabel:  "[::1]",
    +				model.InstanceLabel: "custom-instance",
    +			}),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +			},
    +			res: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "[::1]:443",
    +				model.InstanceLabel:    "custom-instance",
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +			}),
    +			resOrig: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "[::1]",
    +				model.InstanceLabel:    "custom-instance",
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +			}),
    +		},
    +		// Address label missing.
    +		{
    +			in: labels.FromStrings("custom", "value"),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +			},
    +			res:     nil,
    +			resOrig: nil,
    +			err:     fmt.Errorf("no address"),
    +		},
    +		// Address label missing, but added in relabelling.
    +		{
    +			in: labels.FromStrings("custom", "host:1234"),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +				RelabelConfigs: []*config.RelabelConfig{
    +					{
    +						Action:       config.RelabelReplace,
    +						Regex:        mustNewRegexp("(.*)"),
    +						SourceLabels: model.LabelNames{"custom"},
    +						Replacement:  "${1}",
    +						TargetLabel:  string(model.AddressLabel),
    +					},
    +				},
    +			},
    +			res: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "host:1234",
    +				model.InstanceLabel:    "host:1234",
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +				"custom":               "host:1234",
    +			}),
    +			resOrig: labels.FromMap(map[string]string{
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +				"custom":               "host:1234",
    +			}),
    +		},
    +		// Address label missing, but added in relabelling.
    +		{
    +			in: labels.FromStrings("custom", "host:1234"),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +				RelabelConfigs: []*config.RelabelConfig{
    +					{
    +						Action:       config.RelabelReplace,
    +						Regex:        mustNewRegexp("(.*)"),
    +						SourceLabels: model.LabelNames{"custom"},
    +						Replacement:  "${1}",
    +						TargetLabel:  string(model.AddressLabel),
    +					},
    +				},
    +			},
    +			res: labels.FromMap(map[string]string{
    +				model.AddressLabel:     "host:1234",
    +				model.InstanceLabel:    "host:1234",
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +				"custom":               "host:1234",
    +			}),
    +			resOrig: labels.FromMap(map[string]string{
    +				model.SchemeLabel:      "https",
    +				model.MetricsPathLabel: "/metrics",
    +				model.JobLabel:         "job",
    +				"custom":               "host:1234",
    +			}),
    +		},
    +		// Invalid UTF-8 in label.
    +		{
    +			in: labels.FromMap(map[string]string{
    +				model.AddressLabel: "1.2.3.4:1000",
    +				"custom":           "\xbd",
    +			}),
    +			cfg: &config.ScrapeConfig{
    +				Scheme:      "https",
    +				MetricsPath: "/metrics",
    +				JobName:     "job",
    +			},
    +			res:     nil,
    +			resOrig: nil,
    +			err:     fmt.Errorf("invalid label value for \"custom\": \"\\xbd\""),
    +		},
    +	}
    +	for _, c := range cases {
    +		in := c.in.Copy()
    +
    +		res, orig, err := populateLabels(c.in, c.cfg)
    +		testutil.Equals(t, c.err, err)
    +		testutil.Equals(t, c.in, in)
    +		testutil.Equals(t, c.res, res)
    +		testutil.Equals(t, c.resOrig, orig)
    +	}
    +}
    +
    +// TestScrapeManagerReloadNoChange tests that no scrape reload happens when there is no config change.
    +func TestManagerReloadNoChange(t *testing.T) {
    +	tsetName := "test"
    +
    +	reloadCfg := &config.Config{
    +		ScrapeConfigs: []*config.ScrapeConfig{
    +			&config.ScrapeConfig{
    +				ScrapeInterval: model.Duration(3 * time.Second),
    +				ScrapeTimeout:  model.Duration(2 * time.Second),
    +			},
    +		},
    +	}
    +
    +	scrapeManager := NewManager(nil, nil)
    +	scrapeManager.scrapeConfigs[tsetName] = reloadCfg.ScrapeConfigs[0]
    +	// As reload never happens, new loop should never be called.
    +	newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*config.RelabelConfig) loop {
    +		t.Fatal("reload happened")
    +		return nil
    +	}
    +	sp := &scrapePool{
    +		appendable: &nopAppendable{},
    +		targets:    map[uint64]*Target{},
    +		loops: map[uint64]loop{
    +			1: &scrapeLoop{},
    +		},
    +		newLoop: newLoop,
    +		logger:  nil,
    +		config:  reloadCfg.ScrapeConfigs[0],
    +	}
    +	scrapeManager.scrapePools = map[string]*scrapePool{
    +		tsetName: sp,
    +	}
    +
    +	targets := map[string][]*targetgroup.Group{
    +		tsetName: []*targetgroup.Group{},
    +	}
    +
    +	scrapeManager.reload(targets)
    +}
    diff --git a/src/prometheus/scrape/scrape.go b/src/prometheus/scrape/scrape.go
    new file mode 100644
    index 0000000..ed421b7
    --- /dev/null
    +++ b/src/prometheus/scrape/scrape.go
    @@ -0,0 +1,1081 @@
    +// Copyright 2016 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"bufio"
    +	"bytes"
    +	"compress/gzip"
    +	"context"
    +	"fmt"
    +	"io"
    +	"math"
    +	"net/http"
    +	"sync"
    +	"time"
    +	"unsafe"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/go-kit/kit/log/level"
    +	"github.com/prometheus/client_golang/prometheus"
    +	config_util "github.com/prometheus/common/config"
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/common/version"
    +	"golang.org/x/net/context/ctxhttp"
    +
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/discovery/targetgroup"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/pool"
    +	"github.com/prometheus/prometheus/pkg/relabel"
    +	"github.com/prometheus/prometheus/pkg/textparse"
    +	"github.com/prometheus/prometheus/pkg/timestamp"
    +	"github.com/prometheus/prometheus/pkg/value"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +var (
    +	targetIntervalLength = prometheus.NewSummaryVec(
    +		prometheus.SummaryOpts{
    +			Name:       "prometheus_target_interval_length_seconds",
    +			Help:       "Actual intervals between scrapes.",
    +			Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
    +		},
    +		[]string{"interval"},
    +	)
    +	targetReloadIntervalLength = prometheus.NewSummaryVec(
    +		prometheus.SummaryOpts{
    +			Name:       "prometheus_target_reload_length_seconds",
    +			Help:       "Actual interval to reload the scrape pool with a given configuration.",
    +			Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
    +		},
    +		[]string{"interval"},
    +	)
    +	targetSyncIntervalLength = prometheus.NewSummaryVec(
    +		prometheus.SummaryOpts{
    +			Name:       "prometheus_target_sync_length_seconds",
    +			Help:       "Actual interval to sync the scrape pool.",
    +			Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
    +		},
    +		[]string{"scrape_job"},
    +	)
    +	targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
    +		prometheus.CounterOpts{
    +			Name: "prometheus_target_scrape_pool_sync_total",
    +			Help: "Total number of syncs that were executed on a scrape pool.",
    +		},
    +		[]string{"scrape_job"},
    +	)
    +	targetScrapeSampleLimit = prometheus.NewCounter(
    +		prometheus.CounterOpts{
    +			Name: "prometheus_target_scrapes_exceeded_sample_limit_total",
    +			Help: "Total number of scrapes that hit the sample limit and were rejected.",
    +		},
    +	)
    +	targetScrapeSampleDuplicate = prometheus.NewCounter(
    +		prometheus.CounterOpts{
    +			Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total",
    +			Help: "Total number of samples rejected due to duplicate timestamps but different values",
    +		},
    +	)
    +	targetScrapeSampleOutOfOrder = prometheus.NewCounter(
    +		prometheus.CounterOpts{
    +			Name: "prometheus_target_scrapes_sample_out_of_order_total",
    +			Help: "Total number of samples rejected due to not being out of the expected order",
    +		},
    +	)
    +	targetScrapeSampleOutOfBounds = prometheus.NewCounter(
    +		prometheus.CounterOpts{
    +			Name: "prometheus_target_scrapes_sample_out_of_bounds_total",
    +			Help: "Total number of samples rejected due to timestamp falling outside of the time bounds",
    +		},
    +	)
    +)
    +
    +func init() {
    +	prometheus.MustRegister(targetIntervalLength)
    +	prometheus.MustRegister(targetReloadIntervalLength)
    +	prometheus.MustRegister(targetSyncIntervalLength)
    +	prometheus.MustRegister(targetScrapePoolSyncsCounter)
    +	prometheus.MustRegister(targetScrapeSampleLimit)
    +	prometheus.MustRegister(targetScrapeSampleDuplicate)
    +	prometheus.MustRegister(targetScrapeSampleOutOfOrder)
    +	prometheus.MustRegister(targetScrapeSampleOutOfBounds)
    +}
    +
    +// scrapePool manages scrapes for sets of targets.
    +type scrapePool struct {
    +	appendable Appendable
    +	logger     log.Logger
    +
    +	mtx    sync.RWMutex
    +	config *config.ScrapeConfig
    +	client *http.Client
    +	// Targets and loops must always be synchronized to have the same
    +	// set of hashes.
    +	targets        map[uint64]*Target
    +	droppedTargets []*Target
    +	loops          map[uint64]loop
    +	cancel         context.CancelFunc
    +
    +	// Constructor for new scrape loops. This is settable for testing convenience.
    +	newLoop func(*Target, scraper, int, bool, []*config.RelabelConfig) loop
    +}
    +
    +const maxAheadTime = 10 * time.Minute
    +
    +type labelsMutator func(labels.Labels) labels.Labels
    +
    +func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger) *scrapePool {
    +	if logger == nil {
    +		logger = log.NewNopLogger()
    +	}
    +
    +	client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
    +	if err != nil {
    +		// Any errors that could occur here should be caught during config validation.
    +		level.Error(logger).Log("msg", "Error creating HTTP client", "err", err)
    +	}
    +
    +	buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
    +
    +	ctx, cancel := context.WithCancel(context.Background())
    +	sp := &scrapePool{
    +		cancel:     cancel,
    +		appendable: app,
    +		config:     cfg,
    +		client:     client,
    +		targets:    map[uint64]*Target{},
    +		loops:      map[uint64]loop{},
    +		logger:     logger,
    +	}
    +	sp.newLoop = func(t *Target, s scraper, limit int, honor bool, mrc []*config.RelabelConfig) loop {
    +		return newScrapeLoop(
    +			ctx,
    +			s,
    +			log.With(logger, "target", t),
    +			buffers,
    +			func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, t, honor, mrc) },
    +			func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, t) },
    +			func() storage.Appender {
    +				app, err := app.Appender()
    +				if err != nil {
    +					panic(err)
    +				}
    +				return appender(app, limit)
    +			},
    +		)
    +	}
    +
    +	return sp
    +}
    +
    +// stop terminates all scrape loops and returns after they all terminated.
    +func (sp *scrapePool) stop() {
    +	sp.cancel()
    +	var wg sync.WaitGroup
    +
    +	sp.mtx.Lock()
    +	defer sp.mtx.Unlock()
    +
    +	for fp, l := range sp.loops {
    +		wg.Add(1)
    +
    +		go func(l loop) {
    +			l.stop()
    +			wg.Done()
    +		}(l)
    +
    +		delete(sp.loops, fp)
    +		delete(sp.targets, fp)
    +	}
    +	wg.Wait()
    +}
    +
    +// reload the scrape pool with the given scrape configuration. The target state is preserved
    +// but all scrape loops are restarted with the new scrape configuration.
    +// This method returns after all scrape loops that were stopped have stopped scraping.
    +func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
    +	start := time.Now()
    +
    +	sp.mtx.Lock()
    +	defer sp.mtx.Unlock()
    +
    +	client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
    +	if err != nil {
    +		// Any errors that could occur here should be caught during config validation.
    +		level.Error(sp.logger).Log("msg", "Error creating HTTP client", "err", err)
    +	}
    +	sp.config = cfg
    +	sp.client = client
    +
    +	var (
    +		wg       sync.WaitGroup
    +		interval = time.Duration(sp.config.ScrapeInterval)
    +		timeout  = time.Duration(sp.config.ScrapeTimeout)
    +		limit    = int(sp.config.SampleLimit)
    +		honor    = sp.config.HonorLabels
    +		mrc      = sp.config.MetricRelabelConfigs
    +	)
    +
    +	for fp, oldLoop := range sp.loops {
    +		var (
    +			t       = sp.targets[fp]
    +			s       = &targetScraper{Target: t, client: sp.client, timeout: timeout}
    +			newLoop = sp.newLoop(t, s, limit, honor, mrc)
    +		)
    +		wg.Add(1)
    +
    +		go func(oldLoop, newLoop loop) {
    +			oldLoop.stop()
    +			wg.Done()
    +
    +			go newLoop.run(interval, timeout, nil)
    +		}(oldLoop, newLoop)
    +
    +		sp.loops[fp] = newLoop
    +	}
    +
    +	wg.Wait()
    +	targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
    +		time.Since(start).Seconds(),
    +	)
    +}
    +
    +// Sync converts target groups into actual scrape targets and synchronizes
    +// the currently running scraper with the resulting set and returns all scraped and dropped targets.
    +func (sp *scrapePool) Sync(tgs []*targetgroup.Group) (tActive []*Target, tDropped []*Target) {
    +	start := time.Now()
    +
    +	var all []*Target
    +	sp.mtx.Lock()
    +	sp.droppedTargets = []*Target{}
    +	for _, tg := range tgs {
    +		targets, err := targetsFromGroup(tg, sp.config)
    +		if err != nil {
    +			level.Error(sp.logger).Log("msg", "creating targets failed", "err", err)
    +			continue
    +		}
    +		for _, t := range targets {
    +			if t.Labels().Len() > 0 {
    +				all = append(all, t)
    +			} else if t.DiscoveredLabels().Len() > 0 {
    +				sp.droppedTargets = append(sp.droppedTargets, t)
    +			}
    +		}
    +	}
    +	sp.mtx.Unlock()
    +	sp.sync(all)
    +
    +	targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
    +		time.Since(start).Seconds(),
    +	)
    +	targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
    +
    +	sp.mtx.RLock()
    +	for _, t := range sp.targets {
    +		tActive = append(tActive, t)
    +	}
    +	tDropped = sp.droppedTargets
    +	sp.mtx.RUnlock()
    +
    +	return tActive, tDropped
    +}
    +
    +// sync takes a list of potentially duplicated targets, deduplicates them, starts
    +// scrape loops for new targets, and stops scrape loops for disappeared targets.
    +// It returns after all stopped scrape loops terminated.
    +func (sp *scrapePool) sync(targets []*Target) {
    +	sp.mtx.Lock()
    +	defer sp.mtx.Unlock()
    +
    +	var (
    +		uniqueTargets = map[uint64]struct{}{}
    +		interval      = time.Duration(sp.config.ScrapeInterval)
    +		timeout       = time.Duration(sp.config.ScrapeTimeout)
    +		limit         = int(sp.config.SampleLimit)
    +		honor         = sp.config.HonorLabels
    +		mrc           = sp.config.MetricRelabelConfigs
    +	)
    +
    +	for _, t := range targets {
    +		t := t
    +		hash := t.hash()
    +		uniqueTargets[hash] = struct{}{}
    +
    +		if _, ok := sp.targets[hash]; !ok {
    +			s := &targetScraper{Target: t, client: sp.client, timeout: timeout}
    +			l := sp.newLoop(t, s, limit, honor, mrc)
    +
    +			sp.targets[hash] = t
    +			sp.loops[hash] = l
    +
    +			go l.run(interval, timeout, nil)
    +		} else {
    +			// Need to keep the most updated labels information
    +			// for displaying it in the Service Discovery web page.
    +			sp.targets[hash].SetDiscoveredLabels(t.DiscoveredLabels())
    +		}
    +	}
    +
    +	var wg sync.WaitGroup
    +
    +	// Stop and remove old targets and scraper loops.
    +	for hash := range sp.targets {
    +		if _, ok := uniqueTargets[hash]; !ok {
    +			wg.Add(1)
    +			go func(l loop) {
    +				l.stop()
    +				wg.Done()
    +			}(sp.loops[hash])
    +
    +			delete(sp.loops, hash)
    +			delete(sp.targets, hash)
    +		}
    +	}
    +
    +	// Wait for all potentially stopped scrapers to terminate.
    +	// This covers the case of flapping targets. If the server is under high load, a new scraper
    +	// may be active and tries to insert. The old scraper that didn't terminate yet could still
    +	// be inserting a previous sample set.
    +	wg.Wait()
    +}
    +
    +func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*config.RelabelConfig) labels.Labels {
    +	lb := labels.NewBuilder(lset)
    +
    +	if honor {
    +		for _, l := range target.Labels() {
    +			if !lset.Has(l.Name) {
    +				lb.Set(l.Name, l.Value)
    +			}
    +		}
    +	} else {
    +		for _, l := range target.Labels() {
    +			lv := lset.Get(l.Name)
    +			if lv != "" {
    +				lb.Set(model.ExportedLabelPrefix+l.Name, lv)
    +			}
    +			lb.Set(l.Name, l.Value)
    +		}
    +	}
    +
    +	for _, l := range lb.Labels() {
    +		if l.Value == "" {
    +			lb.Del(l.Name)
    +		}
    +	}
    +
    +	res := lb.Labels()
    +
    +	if len(rc) > 0 {
    +		res = relabel.Process(res, rc...)
    +	}
    +
    +	return res
    +}
    +
    +func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
    +	lb := labels.NewBuilder(lset)
    +
    +	for _, l := range target.Labels() {
    +		lv := lset.Get(l.Name)
    +		if lv != "" {
    +			lb.Set(model.ExportedLabelPrefix+l.Name, lv)
    +		}
    +		lb.Set(l.Name, l.Value)
    +	}
    +
    +	return lb.Labels()
    +}
    +
    +// appender returns an appender for ingested samples from the target.
    +func appender(app storage.Appender, limit int) storage.Appender {
    +	app = &timeLimitAppender{
    +		Appender: app,
    +		maxTime:  timestamp.FromTime(time.Now().Add(maxAheadTime)),
    +	}
    +
    +	// The limit is applied after metrics are potentially dropped via relabeling.
    +	if limit > 0 {
    +		app = &limitAppender{
    +			Appender: app,
    +			limit:    limit,
    +		}
    +	}
    +	return app
    +}
    +
    +// A scraper retrieves samples and accepts a status report at the end.
    +type scraper interface {
    +	scrape(ctx context.Context, w io.Writer) error
    +	report(start time.Time, dur time.Duration, err error)
    +	offset(interval time.Duration) time.Duration
    +}
    +
    +// targetScraper implements the scraper interface for a target.
    +type targetScraper struct {
    +	*Target
    +
    +	client  *http.Client
    +	req     *http.Request
    +	timeout time.Duration
    +
    +	gzipr *gzip.Reader
    +	buf   *bufio.Reader
    +}
    +
    +const acceptHeader = `text/plain;version=0.0.4;q=1,*/*;q=0.1`
    +
    +var userAgentHeader = fmt.Sprintf("Prometheus/%s", version.Version)
    +
    +func (s *targetScraper) scrape(ctx context.Context, w io.Writer) error {
    +	if s.req == nil {
    +		req, err := http.NewRequest("GET", s.URL().String(), nil)
    +		if err != nil {
    +			return err
    +		}
    +		req.Header.Add("Accept", acceptHeader)
    +		req.Header.Add("Accept-Encoding", "gzip")
    +		req.Header.Set("User-Agent", userAgentHeader)
    +		req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", fmt.Sprintf("%f", s.timeout.Seconds()))
    +
    +		s.req = req
    +	}
    +
    +	resp, err := ctxhttp.Do(ctx, s.client, s.req)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if resp.StatusCode != http.StatusOK {
    +		return fmt.Errorf("server returned HTTP status %s", resp.Status)
    +	}
    +
    +	if resp.Header.Get("Content-Encoding") != "gzip" {
    +		_, err = io.Copy(w, resp.Body)
    +		return err
    +	}
    +
    +	if s.gzipr == nil {
    +		s.buf = bufio.NewReader(resp.Body)
    +		s.gzipr, err = gzip.NewReader(s.buf)
    +		if err != nil {
    +			return err
    +		}
    +	} else {
    +		s.buf.Reset(resp.Body)
    +		s.gzipr.Reset(s.buf)
    +	}
    +
    +	_, err = io.Copy(w, s.gzipr)
    +	s.gzipr.Close()
    +	return err
    +}
    +
    +// A loop can run and be stopped again. It must not be reused after it was stopped.
    +type loop interface {
    +	run(interval, timeout time.Duration, errc chan<- error)
    +	stop()
    +}
    +
    +type cacheEntry struct {
    +	ref      uint64
    +	lastIter uint64
    +	hash     uint64
    +	lset     labels.Labels
    +}
    +
    +type scrapeLoop struct {
    +	scraper        scraper
    +	l              log.Logger
    +	cache          *scrapeCache
    +	lastScrapeSize int
    +	buffers        *pool.Pool
    +
    +	appender            func() storage.Appender
    +	sampleMutator       labelsMutator
    +	reportSampleMutator labelsMutator
    +
    +	ctx       context.Context
    +	scrapeCtx context.Context
    +	cancel    func()
    +	stopped   chan struct{}
    +}
    +
    +// scrapeCache tracks mappings of exposed metric strings to label sets and
    +// storage references. Additionally, it tracks staleness of series between
    +// scrapes.
    +type scrapeCache struct {
    +	iter uint64 // Current scrape iteration.
    +
    +	// Parsed string to an entry with information about the actual label set
    +	// and its storage reference.
    +	entries map[string]*cacheEntry
    +
    +	// Cache of dropped metric strings and their iteration. The iteration must
    +	// be a pointer so we can update it without setting a new entry with an unsafe
    +	// string in addDropped().
    +	dropped map[string]*uint64
    +
    +	// seriesCur and seriesPrev store the labels of series that were seen
    +	// in the current and previous scrape.
    +	// We hold two maps and swap them out to save allocations.
    +	seriesCur  map[uint64]labels.Labels
    +	seriesPrev map[uint64]labels.Labels
    +}
    +
    +func newScrapeCache() *scrapeCache {
    +	return &scrapeCache{
    +		entries:    map[string]*cacheEntry{},
    +		dropped:    map[string]*uint64{},
    +		seriesCur:  map[uint64]labels.Labels{},
    +		seriesPrev: map[uint64]labels.Labels{},
    +	}
    +}
    +
    +func (c *scrapeCache) iterDone() {
    +	// refCache and lsetCache may grow over time through series churn
    +	// or multiple string representations of the same metric. Clean up entries
    +	// that haven't appeared in the last scrape.
    +	for s, e := range c.entries {
    +		if c.iter-e.lastIter > 2 {
    +			delete(c.entries, s)
    +		}
    +	}
    +	for s, iter := range c.dropped {
    +		if c.iter-*iter > 2 {
    +			delete(c.dropped, s)
    +		}
    +	}
    +
    +	// Swap current and previous series.
    +	c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev
    +
    +	// We have to delete every single key in the map.
    +	for k := range c.seriesCur {
    +		delete(c.seriesCur, k)
    +	}
    +
    +	c.iter++
    +}
    +
    +func (c *scrapeCache) get(met string) (*cacheEntry, bool) {
    +	e, ok := c.entries[met]
    +	if !ok {
    +		return nil, false
    +	}
    +	e.lastIter = c.iter
    +	return e, true
    +}
    +
    +func (c *scrapeCache) addRef(met string, ref uint64, lset labels.Labels, hash uint64) {
    +	if ref == 0 {
    +		return
    +	}
    +	c.entries[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
    +}
    +
    +func (c *scrapeCache) addDropped(met string) {
    +	iter := c.iter
    +	c.dropped[met] = &iter
    +}
    +
    +func (c *scrapeCache) getDropped(met string) bool {
    +	iterp, ok := c.dropped[met]
    +	if ok {
    +		*iterp = c.iter
    +	}
    +	return ok
    +}
    +
    +func (c *scrapeCache) trackStaleness(hash uint64, lset labels.Labels) {
    +	c.seriesCur[hash] = lset
    +}
    +
    +func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
    +	for h, lset := range c.seriesPrev {
    +		if _, ok := c.seriesCur[h]; !ok {
    +			if !f(lset) {
    +				break
    +			}
    +		}
    +	}
    +}
    +
    +func newScrapeLoop(ctx context.Context,
    +	sc scraper,
    +	l log.Logger,
    +	buffers *pool.Pool,
    +	sampleMutator labelsMutator,
    +	reportSampleMutator labelsMutator,
    +	appender func() storage.Appender,
    +) *scrapeLoop {
    +	if l == nil {
    +		l = log.NewNopLogger()
    +	}
    +	if buffers == nil {
    +		buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
    +	}
    +	sl := &scrapeLoop{
    +		scraper:             sc,
    +		buffers:             buffers,
    +		cache:               newScrapeCache(),
    +		appender:            appender,
    +		sampleMutator:       sampleMutator,
    +		reportSampleMutator: reportSampleMutator,
    +		stopped:             make(chan struct{}),
    +		l:                   l,
    +		ctx:                 ctx,
    +	}
    +	sl.scrapeCtx, sl.cancel = context.WithCancel(ctx)
    +
    +	return sl
    +}
    +
    +func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
    +	select {
    +	case <-time.After(sl.scraper.offset(interval)):
    +		// Continue after a scraping offset.
    +	case <-sl.scrapeCtx.Done():
    +		close(sl.stopped)
    +		return
    +	}
    +
    +	var last time.Time
    +
    +	ticker := time.NewTicker(interval)
    +	defer ticker.Stop()
    +
    +	buf := bytes.NewBuffer(make([]byte, 0, 16000))
    +
    +mainLoop:
    +	for {
    +		buf.Reset()
    +		select {
    +		case <-sl.ctx.Done():
    +			close(sl.stopped)
    +			return
    +		case <-sl.scrapeCtx.Done():
    +			break mainLoop
    +		default:
    +		}
    +
    +		var (
    +			start             = time.Now()
    +			scrapeCtx, cancel = context.WithTimeout(sl.ctx, timeout)
    +		)
    +
    +		// Only record after the first scrape.
    +		if !last.IsZero() {
    +			targetIntervalLength.WithLabelValues(interval.String()).Observe(
    +				time.Since(last).Seconds(),
    +			)
    +		}
    +
    +		b := sl.buffers.Get(sl.lastScrapeSize).([]byte)
    +		buf := bytes.NewBuffer(b)
    +
    +		scrapeErr := sl.scraper.scrape(scrapeCtx, buf)
    +		cancel()
    +
    +		if scrapeErr == nil {
    +			b = buf.Bytes()
    +			// NOTE: There were issues with misbehaving clients in the past
    +			// that occasionally returned empty results. We don't want those
    +			// to falsely reset our buffer size.
    +			if len(b) > 0 {
    +				sl.lastScrapeSize = len(b)
    +			}
    +		} else {
    +			level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr.Error())
    +			if errc != nil {
    +				errc <- scrapeErr
    +			}
    +		}
    +
    +		// A failed scrape is the same as an empty scrape,
    +		// we still call sl.append to trigger stale markers.
    +		total, added, appErr := sl.append(b, start)
    +		if appErr != nil {
    +			level.Warn(sl.l).Log("msg", "append failed", "err", appErr)
    +			// The append failed, probably due to a parse error or sample limit.
    +			// Call sl.append again with an empty scrape to trigger stale markers.
    +			if _, _, err := sl.append([]byte{}, start); err != nil {
    +				level.Warn(sl.l).Log("msg", "append failed", "err", err)
    +			}
    +		}
    +
    +		sl.buffers.Put(b)
    +
    +		if scrapeErr == nil {
    +			scrapeErr = appErr
    +		}
    +
    +		sl.report(start, time.Since(start), total, added, scrapeErr)
    +		last = start
    +
    +		select {
    +		case <-sl.ctx.Done():
    +			close(sl.stopped)
    +			return
    +		case <-sl.scrapeCtx.Done():
    +			break mainLoop
    +		case <-ticker.C:
    +		}
    +	}
    +
    +	close(sl.stopped)
    +
    +	sl.endOfRunStaleness(last, ticker, interval)
    +}
    +
    +func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, interval time.Duration) {
    +	// Scraping has stopped. We want to write stale markers but
    +	// the target may be recreated, so we wait just over 2 scrape intervals
    +	// before creating them.
    +	// If the context is cancelled, we presume the server is shutting down
    +	// and will restart where is was. We do not attempt to write stale markers
    +	// in this case.
    +
    +	if last.IsZero() {
    +		// There never was a scrape, so there will be no stale markers.
    +		return
    +	}
    +
    +	// Wait for when the next scrape would have been, record its timestamp.
    +	var staleTime time.Time
    +	select {
    +	case <-sl.ctx.Done():
    +		return
    +	case <-ticker.C:
    +		staleTime = time.Now()
    +	}
    +
    +	// Wait for when the next scrape would have been, if the target was recreated
    +	// samples should have been ingested by now.
    +	select {
    +	case <-sl.ctx.Done():
    +		return
    +	case <-ticker.C:
    +	}
    +
    +	// Wait for an extra 10% of the interval, just to be safe.
    +	select {
    +	case <-sl.ctx.Done():
    +		return
    +	case <-time.After(interval / 10):
    +	}
    +
    +	// Call sl.append again with an empty scrape to trigger stale markers.
    +	// If the target has since been recreated and scraped, the
    +	// stale markers will be out of order and ignored.
    +	if _, _, err := sl.append([]byte{}, staleTime); err != nil {
    +		level.Error(sl.l).Log("msg", "stale append failed", "err", err)
    +	}
    +	if err := sl.reportStale(staleTime); err != nil {
    +		level.Error(sl.l).Log("msg", "stale report failed", "err", err)
    +	}
    +}
    +
    +// Stop the scraping. May still write data and stale markers after it has
    +// returned. Cancel the context to stop all writes.
    +func (sl *scrapeLoop) stop() {
    +	sl.cancel()
    +	<-sl.stopped
    +}
    +
    +type sample struct {
    +	metric labels.Labels
    +	t      int64
    +	v      float64
    +}
    +
    +type samples []sample
    +
    +func (s samples) Len() int      { return len(s) }
    +func (s samples) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
    +
    +func (s samples) Less(i, j int) bool {
    +	d := labels.Compare(s[i].metric, s[j].metric)
    +	if d < 0 {
    +		return true
    +	} else if d > 0 {
    +		return false
    +	}
    +	return s[i].t < s[j].t
    +}
    +
    +func (sl *scrapeLoop) append(b []byte, ts time.Time) (total, added int, err error) {
    +	var (
    +		app            = sl.appender()
    +		p              = textparse.New(b)
    +		defTime        = timestamp.FromTime(ts)
    +		numOutOfOrder  = 0
    +		numDuplicates  = 0
    +		numOutOfBounds = 0
    +	)
    +	var sampleLimitErr error
    +
    +loop:
    +	for p.Next() {
    +		total++
    +
    +		t := defTime
    +		met, tp, v := p.At()
    +		if tp != nil {
    +			t = *tp
    +		}
    +
    +		if sl.cache.getDropped(yoloString(met)) {
    +			continue
    +		}
    +		ce, ok := sl.cache.get(yoloString(met))
    +		if ok {
    +			switch err = app.AddFast(ce.lset, ce.ref, t, v); err {
    +			case nil:
    +				if tp == nil {
    +					sl.cache.trackStaleness(ce.hash, ce.lset)
    +				}
    +			case storage.ErrNotFound:
    +				ok = false
    +			case storage.ErrOutOfOrderSample:
    +				numOutOfOrder++
    +				level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
    +				targetScrapeSampleOutOfOrder.Inc()
    +				continue
    +			case storage.ErrDuplicateSampleForTimestamp:
    +				numDuplicates++
    +				level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
    +				targetScrapeSampleDuplicate.Inc()
    +				continue
    +			case storage.ErrOutOfBounds:
    +				numOutOfBounds++
    +				level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
    +				targetScrapeSampleOutOfBounds.Inc()
    +				continue
    +			case errSampleLimit:
    +				// Keep on parsing output if we hit the limit, so we report the correct
    +				// total number of samples scraped.
    +				sampleLimitErr = err
    +				added++
    +				continue
    +			default:
    +				break loop
    +			}
    +		}
    +		if !ok {
    +			var lset labels.Labels
    +
    +			mets := p.Metric(&lset)
    +			hash := lset.Hash()
    +
    +			// Hash label set as it is seen local to the target. Then add target labels
    +			// and relabeling and store the final label set.
    +			lset = sl.sampleMutator(lset)
    +
    +			// The label set may be set to nil to indicate dropping.
    +			if lset == nil {
    +				sl.cache.addDropped(mets)
    +				continue
    +			}
    +
    +			var ref uint64
    +			ref, err = app.Add(lset, t, v)
    +			// TODO(fabxc): also add a dropped-cache?
    +			switch err {
    +			case nil:
    +			case storage.ErrOutOfOrderSample:
    +				err = nil
    +				numOutOfOrder++
    +				level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
    +				targetScrapeSampleOutOfOrder.Inc()
    +				continue
    +			case storage.ErrDuplicateSampleForTimestamp:
    +				err = nil
    +				numDuplicates++
    +				level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
    +				targetScrapeSampleDuplicate.Inc()
    +				continue
    +			case storage.ErrOutOfBounds:
    +				err = nil
    +				numOutOfBounds++
    +				level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
    +				targetScrapeSampleOutOfBounds.Inc()
    +				continue
    +			case errSampleLimit:
    +				sampleLimitErr = err
    +				added++
    +				continue
    +			default:
    +				level.Debug(sl.l).Log("msg", "unexpected error", "series", string(met), "err", err)
    +				break loop
    +			}
    +			if tp == nil {
    +				// Bypass staleness logic if there is an explicit timestamp.
    +				sl.cache.trackStaleness(hash, lset)
    +			}
    +			sl.cache.addRef(mets, ref, lset, hash)
    +		}
    +		added++
    +	}
    +	if err == nil {
    +		err = p.Err()
    +	}
    +	if sampleLimitErr != nil {
    +		// We only want to increment this once per scrape, so this is Inc'd outside the loop.
    +		targetScrapeSampleLimit.Inc()
    +	}
    +	if numOutOfOrder > 0 {
    +		level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", numOutOfOrder)
    +	}
    +	if numDuplicates > 0 {
    +		level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", numDuplicates)
    +	}
    +	if numOutOfBounds > 0 {
    +		level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", numOutOfBounds)
    +	}
    +	if err == nil {
    +		sl.cache.forEachStale(func(lset labels.Labels) bool {
    +			// Series no longer exposed, mark it stale.
    +			_, err = app.Add(lset, defTime, math.Float64frombits(value.StaleNaN))
    +			switch err {
    +			case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
    +				// Do not count these in logging, as this is expected if a target
    +				// goes away and comes back again with a new scrape loop.
    +				err = nil
    +			}
    +			return err == nil
    +		})
    +	}
    +	if err != nil {
    +		app.Rollback()
    +		return total, added, err
    +	}
    +	if err := app.Commit(); err != nil {
    +		return total, added, err
    +	}
    +
    +	sl.cache.iterDone()
    +
    +	return total, added, nil
    +}
    +
    +func yoloString(b []byte) string {
    +	return *((*string)(unsafe.Pointer(&b)))
    +}
    +
    +// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
    +// with scraped metrics in the cache.
    +const (
    +	scrapeHealthMetricName       = "up" + "\xff"
    +	scrapeDurationMetricName     = "scrape_duration_seconds" + "\xff"
    +	scrapeSamplesMetricName      = "scrape_samples_scraped" + "\xff"
    +	samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff"
    +)
    +
    +func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, appended int, err error) error {
    +	sl.scraper.report(start, duration, err)
    +
    +	ts := timestamp.FromTime(start)
    +
    +	var health float64
    +	if err == nil {
    +		health = 1
    +	}
    +	app := sl.appender()
    +
    +	if err := sl.addReportSample(app, scrapeHealthMetricName, ts, health); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	if err := sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds()); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	if err := sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped)); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	if err := sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(appended)); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	return app.Commit()
    +}
    +
    +func (sl *scrapeLoop) reportStale(start time.Time) error {
    +	ts := timestamp.FromTime(start)
    +	app := sl.appender()
    +
    +	stale := math.Float64frombits(value.StaleNaN)
    +
    +	if err := sl.addReportSample(app, scrapeHealthMetricName, ts, stale); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	if err := sl.addReportSample(app, scrapeDurationMetricName, ts, stale); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	if err := sl.addReportSample(app, scrapeSamplesMetricName, ts, stale); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	if err := sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale); err != nil {
    +		app.Rollback()
    +		return err
    +	}
    +	return app.Commit()
    +}
    +
    +func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error {
    +	ce, ok := sl.cache.get(s)
    +	if ok {
    +		err := app.AddFast(ce.lset, ce.ref, t, v)
    +		switch err {
    +		case nil:
    +			return nil
    +		case storage.ErrNotFound:
    +			// Try an Add.
    +		case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
    +			// Do not log here, as this is expected if a target goes away and comes back
    +			// again with a new scrape loop.
    +			return nil
    +		default:
    +			return err
    +		}
    +	}
    +	lset := labels.Labels{
    +		// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
    +		// with scraped metrics in the cache.
    +		// We have to drop it when building the actual metric.
    +		labels.Label{Name: labels.MetricName, Value: s[:len(s)-1]},
    +	}
    +
    +	hash := lset.Hash()
    +	lset = sl.reportSampleMutator(lset)
    +
    +	ref, err := app.Add(lset, t, v)
    +	switch err {
    +	case nil:
    +		sl.cache.addRef(s, ref, lset, hash)
    +		return nil
    +	case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
    +		return nil
    +	default:
    +		return err
    +	}
    +}
    diff --git a/src/prometheus/scrape/scrape_test.go b/src/prometheus/scrape/scrape_test.go
    new file mode 100644
    index 0000000..ea2a97b
    --- /dev/null
    +++ b/src/prometheus/scrape/scrape_test.go
    @@ -0,0 +1,1265 @@
    +// Copyright 2016 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"bytes"
    +	"context"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"math"
    +	"net/http"
    +	"net/http/httptest"
    +	"net/url"
    +	"reflect"
    +	"strings"
    +	"sync"
    +	"testing"
    +	"time"
    +
    +	"github.com/prometheus/common/model"
    +	"github.com/stretchr/testify/require"
    +
    +	dto "github.com/prometheus/client_model/go"
    +
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/discovery/targetgroup"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/timestamp"
    +	"github.com/prometheus/prometheus/pkg/value"
    +	"github.com/prometheus/prometheus/storage"
    +	"github.com/prometheus/prometheus/util/testutil"
    +)
    +
    +func TestNewScrapePool(t *testing.T) {
    +	var (
    +		app = &nopAppendable{}
    +		cfg = &config.ScrapeConfig{}
    +		sp  = newScrapePool(cfg, app, nil)
    +	)
    +
    +	if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
    +		t.Fatalf("Wrong sample appender")
    +	}
    +	if sp.config != cfg {
    +		t.Fatalf("Wrong scrape config")
    +	}
    +	if sp.newLoop == nil {
    +		t.Fatalf("newLoop function not initialized")
    +	}
    +}
    +
    +func TestDroppedTargetsList(t *testing.T) {
    +	var (
    +		app = &nopAppendable{}
    +		cfg = &config.ScrapeConfig{
    +			JobName:        "dropMe",
    +			ScrapeInterval: model.Duration(1),
    +			RelabelConfigs: []*config.RelabelConfig{
    +				{
    +					Action:       config.RelabelDrop,
    +					Regex:        mustNewRegexp("dropMe"),
    +					SourceLabels: model.LabelNames{"job"},
    +				},
    +			},
    +		}
    +		tgs = []*targetgroup.Group{
    +			{
    +				Targets: []model.LabelSet{
    +					model.LabelSet{model.AddressLabel: "127.0.0.1:9090"},
    +				},
    +			},
    +		}
    +		sp                     = newScrapePool(cfg, app, nil)
    +		expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __metrics_path__=\"\", __scheme__=\"\", job=\"dropMe\"}"
    +		expectedLength         = 1
    +	)
    +	sp.Sync(tgs)
    +	sp.Sync(tgs)
    +	if len(sp.droppedTargets) != expectedLength {
    +		t.Fatalf("Length of dropped targets exceeded expected length, expected %v, got %v", expectedLength, len(sp.droppedTargets))
    +	}
    +	if sp.droppedTargets[0].DiscoveredLabels().String() != expectedLabelSetString {
    +		t.Fatalf("Got %v, expected %v", sp.droppedTargets[0].DiscoveredLabels().String(), expectedLabelSetString)
    +	}
    +}
    +
    +// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
    +// even when new labels don't affect the target `hash`.
    +func TestDiscoveredLabelsUpdate(t *testing.T) {
    +
    +	sp := &scrapePool{}
    +	// These are used when syncing so need this to avoid a panic.
    +	sp.config = &config.ScrapeConfig{
    +		ScrapeInterval: model.Duration(1),
    +		ScrapeTimeout:  model.Duration(1),
    +	}
    +	sp.targets = make(map[uint64]*Target)
    +	t1 := &Target{
    +		discoveredLabels: labels.Labels{
    +			labels.Label{
    +				Name:  "label",
    +				Value: "name",
    +			},
    +		},
    +	}
    +	sp.targets[t1.hash()] = t1
    +
    +	t2 := &Target{
    +		discoveredLabels: labels.Labels{
    +			labels.Label{
    +				Name:  "labelNew",
    +				Value: "nameNew",
    +			},
    +		},
    +	}
    +	sp.sync([]*Target{t2})
    +
    +	testutil.Equals(t, t2.DiscoveredLabels(), sp.targets[t1.hash()].DiscoveredLabels())
    +}
    +
    +type testLoop struct {
    +	startFunc func(interval, timeout time.Duration, errc chan<- error)
    +	stopFunc  func()
    +}
    +
    +func (l *testLoop) run(interval, timeout time.Duration, errc chan<- error) {
    +	l.startFunc(interval, timeout, errc)
    +}
    +
    +func (l *testLoop) stop() {
    +	l.stopFunc()
    +}
    +
    +func TestScrapePoolStop(t *testing.T) {
    +	sp := &scrapePool{
    +		targets: map[uint64]*Target{},
    +		loops:   map[uint64]loop{},
    +		cancel:  func() {},
    +	}
    +	var mtx sync.Mutex
    +	stopped := map[uint64]bool{}
    +	numTargets := 20
    +
    +	// Stopping the scrape pool must call stop() on all scrape loops,
    +	// clean them and the respective targets up. It must wait until each loop's
    +	// stop function returned before returning itself.
    +
    +	for i := 0; i < numTargets; i++ {
    +		t := &Target{
    +			labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
    +		}
    +		l := &testLoop{}
    +		l.stopFunc = func() {
    +			time.Sleep(time.Duration(i*20) * time.Millisecond)
    +
    +			mtx.Lock()
    +			stopped[t.hash()] = true
    +			mtx.Unlock()
    +		}
    +
    +		sp.targets[t.hash()] = t
    +		sp.loops[t.hash()] = l
    +	}
    +
    +	done := make(chan struct{})
    +	stopTime := time.Now()
    +
    +	go func() {
    +		sp.stop()
    +		close(done)
    +	}()
    +
    +	select {
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("scrapeLoop.stop() did not return as expected")
    +	case <-done:
    +		// This should have taken at least as long as the last target slept.
    +		if time.Since(stopTime) < time.Duration(numTargets*20)*time.Millisecond {
    +			t.Fatalf("scrapeLoop.stop() exited before all targets stopped")
    +		}
    +	}
    +
    +	mtx.Lock()
    +	if len(stopped) != numTargets {
    +		t.Fatalf("Expected 20 stopped loops, got %d", len(stopped))
    +	}
    +	mtx.Unlock()
    +
    +	if len(sp.targets) > 0 {
    +		t.Fatalf("Targets were not cleared on stopping: %d left", len(sp.targets))
    +	}
    +	if len(sp.loops) > 0 {
    +		t.Fatalf("Loops were not cleared on stopping: %d left", len(sp.loops))
    +	}
    +}
    +
    +func TestScrapePoolReload(t *testing.T) {
    +	var mtx sync.Mutex
    +	numTargets := 20
    +
    +	stopped := map[uint64]bool{}
    +
    +	reloadCfg := &config.ScrapeConfig{
    +		ScrapeInterval: model.Duration(3 * time.Second),
    +		ScrapeTimeout:  model.Duration(2 * time.Second),
    +	}
    +	// On starting to run, new loops created on reload check whether their preceding
    +	// equivalents have been stopped.
    +	newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*config.RelabelConfig) loop {
    +		l := &testLoop{}
    +		l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
    +			if interval != 3*time.Second {
    +				t.Errorf("Expected scrape interval %d but got %d", 3*time.Second, interval)
    +			}
    +			if timeout != 2*time.Second {
    +				t.Errorf("Expected scrape timeout %d but got %d", 2*time.Second, timeout)
    +			}
    +			mtx.Lock()
    +			if !stopped[s.(*targetScraper).hash()] {
    +				t.Errorf("Scrape loop for %v not stopped yet", s.(*targetScraper))
    +			}
    +			mtx.Unlock()
    +		}
    +		return l
    +	}
    +	sp := &scrapePool{
    +		appendable: &nopAppendable{},
    +		targets:    map[uint64]*Target{},
    +		loops:      map[uint64]loop{},
    +		newLoop:    newLoop,
    +		logger:     nil,
    +	}
    +
    +	// Reloading a scrape pool with a new scrape configuration must stop all scrape
    +	// loops and start new ones. A new loop must not be started before the preceding
    +	// one terminated.
    +
    +	for i := 0; i < numTargets; i++ {
    +		t := &Target{
    +			labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
    +		}
    +		l := &testLoop{}
    +		l.stopFunc = func() {
    +			time.Sleep(time.Duration(i*20) * time.Millisecond)
    +
    +			mtx.Lock()
    +			stopped[t.hash()] = true
    +			mtx.Unlock()
    +		}
    +
    +		sp.targets[t.hash()] = t
    +		sp.loops[t.hash()] = l
    +	}
    +	done := make(chan struct{})
    +
    +	beforeTargets := map[uint64]*Target{}
    +	for h, t := range sp.targets {
    +		beforeTargets[h] = t
    +	}
    +
    +	reloadTime := time.Now()
    +
    +	go func() {
    +		sp.reload(reloadCfg)
    +		close(done)
    +	}()
    +
    +	select {
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("scrapeLoop.reload() did not return as expected")
    +	case <-done:
    +		// This should have taken at least as long as the last target slept.
    +		if time.Since(reloadTime) < time.Duration(numTargets*20)*time.Millisecond {
    +			t.Fatalf("scrapeLoop.stop() exited before all targets stopped")
    +		}
    +	}
    +
    +	mtx.Lock()
    +	if len(stopped) != numTargets {
    +		t.Fatalf("Expected 20 stopped loops, got %d", len(stopped))
    +	}
    +	mtx.Unlock()
    +
    +	if !reflect.DeepEqual(sp.targets, beforeTargets) {
    +		t.Fatalf("Reloading affected target states unexpectedly")
    +	}
    +	if len(sp.loops) != numTargets {
    +		t.Fatalf("Expected %d loops after reload but got %d", numTargets, len(sp.loops))
    +	}
    +}
    +
    +func TestScrapePoolAppender(t *testing.T) {
    +	cfg := &config.ScrapeConfig{}
    +	app := &nopAppendable{}
    +	sp := newScrapePool(cfg, app, nil)
    +
    +	loop := sp.newLoop(nil, nil, 0, false, nil)
    +	appl, ok := loop.(*scrapeLoop)
    +	if !ok {
    +		t.Fatalf("Expected scrapeLoop but got %T", loop)
    +	}
    +	wrapped := appl.appender()
    +
    +	tl, ok := wrapped.(*timeLimitAppender)
    +	if !ok {
    +		t.Fatalf("Expected timeLimitAppender but got %T", wrapped)
    +	}
    +	if _, ok := tl.Appender.(nopAppender); !ok {
    +		t.Fatalf("Expected base appender but got %T", tl.Appender)
    +	}
    +
    +	loop = sp.newLoop(nil, nil, 100, false, nil)
    +	appl, ok = loop.(*scrapeLoop)
    +	if !ok {
    +		t.Fatalf("Expected scrapeLoop but got %T", loop)
    +	}
    +	wrapped = appl.appender()
    +
    +	sl, ok := wrapped.(*limitAppender)
    +	if !ok {
    +		t.Fatalf("Expected limitAppender but got %T", wrapped)
    +	}
    +	tl, ok = sl.Appender.(*timeLimitAppender)
    +	if !ok {
    +		t.Fatalf("Expected limitAppender but got %T", sl.Appender)
    +	}
    +	if _, ok := tl.Appender.(nopAppender); !ok {
    +		t.Fatalf("Expected base appender but got %T", tl.Appender)
    +	}
    +}
    +
    +func TestScrapePoolRaces(t *testing.T) {
    +	interval, _ := model.ParseDuration("500ms")
    +	timeout, _ := model.ParseDuration("1s")
    +	newConfig := func() *config.ScrapeConfig {
    +		return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
    +	}
    +	sp := newScrapePool(newConfig(), &nopAppendable{}, nil)
    +	tgts := []*targetgroup.Group{
    +		&targetgroup.Group{
    +			Targets: []model.LabelSet{
    +				model.LabelSet{model.AddressLabel: "127.0.0.1:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.2:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.3:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.4:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.5:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.6:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.7:9090"},
    +				model.LabelSet{model.AddressLabel: "127.0.0.8:9090"},
    +			},
    +		},
    +	}
    +
    +	active, dropped := sp.Sync(tgts)
    +	expectedActive, expectedDropped := len(tgts[0].Targets), 0
    +	if len(active) != expectedActive {
    +		t.Fatalf("Invalid number of active targets: expected %v, got %v", expectedActive, len(active))
    +	}
    +	if len(dropped) != expectedDropped {
    +		t.Fatalf("Invalid number of dropped targets: expected %v, got %v", expectedDropped, len(dropped))
    +	}
    +
    +	for i := 0; i < 20; i++ {
    +		time.Sleep(time.Duration(10 * time.Millisecond))
    +		sp.reload(newConfig())
    +	}
    +	sp.stop()
    +}
    +
    +func TestScrapeLoopStopBeforeRun(t *testing.T) {
    +	scraper := &testScraper{}
    +
    +	sl := newScrapeLoop(context.Background(),
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		nil,
    +	)
    +
    +	// The scrape pool synchronizes on stopping scrape loops. However, new scrape
    +	// loops are started asynchronously. Thus it's possible, that a loop is stopped
    +	// again before having started properly.
    +	// Stopping not-yet-started loops must block until the run method was called and exited.
    +	// The run method must exit immediately.
    +
    +	stopDone := make(chan struct{})
    +	go func() {
    +		sl.stop()
    +		close(stopDone)
    +	}()
    +
    +	select {
    +	case <-stopDone:
    +		t.Fatalf("Stopping terminated before run exited successfully")
    +	case <-time.After(500 * time.Millisecond):
    +	}
    +
    +	// Running the scrape loop must exit before calling the scraper even once.
    +	scraper.scrapeFunc = func(context.Context, io.Writer) error {
    +		t.Fatalf("scraper was called for terminated scrape loop")
    +		return nil
    +	}
    +
    +	runDone := make(chan struct{})
    +	go func() {
    +		sl.run(1, 0, nil)
    +		close(runDone)
    +	}()
    +
    +	select {
    +	case <-runDone:
    +	case <-time.After(1 * time.Second):
    +		t.Fatalf("Running terminated scrape loop did not exit")
    +	}
    +
    +	select {
    +	case <-stopDone:
    +	case <-time.After(1 * time.Second):
    +		t.Fatalf("Stopping did not terminate after running exited")
    +	}
    +}
    +
    +func nopMutator(l labels.Labels) labels.Labels { return l }
    +
    +func TestScrapeLoopStop(t *testing.T) {
    +	var (
    +		signal   = make(chan struct{})
    +		appender = &collectResultAppender{}
    +		scraper  = &testScraper{}
    +		app      = func() storage.Appender { return appender }
    +	)
    +	defer close(signal)
    +
    +	sl := newScrapeLoop(context.Background(),
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +
    +	// Terminate loop after 2 scrapes.
    +	numScrapes := 0
    +
    +	scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
    +		numScrapes++
    +		if numScrapes == 2 {
    +			go sl.stop()
    +		}
    +		w.Write([]byte("metric_a 42\n"))
    +		return nil
    +	}
    +
    +	go func() {
    +		sl.run(10*time.Millisecond, time.Hour, nil)
    +		signal <- struct{}{}
    +	}()
    +
    +	select {
    +	case <-signal:
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("Scrape wasn't stopped.")
    +	}
    +
    +	// We expected 1 actual sample for each scrape plus 4 for report samples.
    +	// At least 2 scrapes were made, plus the final stale markers.
    +	if len(appender.result) < 5*3 || len(appender.result)%5 != 0 {
    +		t.Fatalf("Expected at least 3 scrapes with 4 samples each, got %d samples", len(appender.result))
    +	}
    +	// All samples in a scrape must have the same timestamp.
    +	var ts int64
    +	for i, s := range appender.result {
    +		if i%5 == 0 {
    +			ts = s.t
    +		} else if s.t != ts {
    +			t.Fatalf("Unexpected multiple timestamps within single scrape")
    +		}
    +	}
    +	// All samples from the last scrape must be stale markers.
    +	for _, s := range appender.result[len(appender.result)-5:] {
    +		if !value.IsStaleNaN(s.v) {
    +			t.Fatalf("Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.v))
    +		}
    +	}
    +}
    +
    +func TestScrapeLoopRun(t *testing.T) {
    +	var (
    +		signal = make(chan struct{})
    +		errc   = make(chan error)
    +
    +		scraper = &testScraper{}
    +		app     = func() storage.Appender { return &nopAppender{} }
    +	)
    +	defer close(signal)
    +
    +	ctx, cancel := context.WithCancel(context.Background())
    +	sl := newScrapeLoop(ctx,
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +
    +	// The loop must terminate during the initial offset if the context
    +	// is canceled.
    +	scraper.offsetDur = time.Hour
    +
    +	go func() {
    +		sl.run(time.Second, time.Hour, errc)
    +		signal <- struct{}{}
    +	}()
    +
    +	// Wait to make sure we are actually waiting on the offset.
    +	time.Sleep(1 * time.Second)
    +
    +	cancel()
    +	select {
    +	case <-signal:
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("Cancelation during initial offset failed")
    +	case err := <-errc:
    +		t.Fatalf("Unexpected error: %s", err)
    +	}
    +
    +	// The provided timeout must cause cancelation of the context passed down to the
    +	// scraper. The scraper has to respect the context.
    +	scraper.offsetDur = 0
    +
    +	block := make(chan struct{})
    +	scraper.scrapeFunc = func(ctx context.Context, _ io.Writer) error {
    +		select {
    +		case <-block:
    +		case <-ctx.Done():
    +			return ctx.Err()
    +		}
    +		return nil
    +	}
    +
    +	ctx, cancel = context.WithCancel(context.Background())
    +	sl = newScrapeLoop(ctx,
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +
    +	go func() {
    +		sl.run(time.Second, 100*time.Millisecond, errc)
    +		signal <- struct{}{}
    +	}()
    +
    +	select {
    +	case err := <-errc:
    +		if err != context.DeadlineExceeded {
    +			t.Fatalf("Expected timeout error but got: %s", err)
    +		}
    +	case <-time.After(3 * time.Second):
    +		t.Fatalf("Expected timeout error but got none")
    +	}
    +
    +	// We already caught the timeout error and are certainly in the loop.
    +	// Let the scrapes returns immediately to cause no further timeout errors
    +	// and check whether canceling the parent context terminates the loop.
    +	close(block)
    +	cancel()
    +
    +	select {
    +	case <-signal:
    +		// Loop terminated as expected.
    +	case err := <-errc:
    +		t.Fatalf("Unexpected error: %s", err)
    +	case <-time.After(3 * time.Second):
    +		t.Fatalf("Loop did not terminate on context cancelation")
    +	}
    +}
    +
    +func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
    +	appender := &collectResultAppender{}
    +	var (
    +		signal  = make(chan struct{})
    +		scraper = &testScraper{}
    +		app     = func() storage.Appender { return appender }
    +	)
    +	defer close(signal)
    +
    +	ctx, cancel := context.WithCancel(context.Background())
    +	sl := newScrapeLoop(ctx,
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +	// Succeed once, several failures, then stop.
    +	numScrapes := 0
    +
    +	scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
    +		numScrapes++
    +
    +		if numScrapes == 1 {
    +			w.Write([]byte("metric_a 42\n"))
    +			return nil
    +		} else if numScrapes == 5 {
    +			cancel()
    +		}
    +		return fmt.Errorf("scrape failed")
    +	}
    +
    +	go func() {
    +		sl.run(10*time.Millisecond, time.Hour, nil)
    +		signal <- struct{}{}
    +	}()
    +
    +	select {
    +	case <-signal:
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("Scrape wasn't stopped.")
    +	}
    +
    +	// 1 successfully scraped sample, 1 stale marker after first fail, 4 report samples for
    +	// each scrape successful or not.
    +	if len(appender.result) != 22 {
    +		t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 22, len(appender.result))
    +	}
    +	if appender.result[0].v != 42.0 {
    +		t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0].v, 42.0)
    +	}
    +	if !value.IsStaleNaN(appender.result[5].v) {
    +		t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[5].v))
    +	}
    +}
    +
    +func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
    +	appender := &collectResultAppender{}
    +	var (
    +		signal     = make(chan struct{})
    +		scraper    = &testScraper{}
    +		app        = func() storage.Appender { return appender }
    +		numScrapes = 0
    +	)
    +	defer close(signal)
    +
    +	ctx, cancel := context.WithCancel(context.Background())
    +	sl := newScrapeLoop(ctx,
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +
    +	// Succeed once, several failures, then stop.
    +	scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
    +		numScrapes++
    +
    +		if numScrapes == 1 {
    +			w.Write([]byte("metric_a 42\n"))
    +			return nil
    +		} else if numScrapes == 2 {
    +			w.Write([]byte("7&-\n"))
    +			return nil
    +		} else if numScrapes == 3 {
    +			cancel()
    +		}
    +		return fmt.Errorf("scrape failed")
    +	}
    +
    +	go func() {
    +		sl.run(10*time.Millisecond, time.Hour, nil)
    +		signal <- struct{}{}
    +	}()
    +
    +	select {
    +	case <-signal:
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("Scrape wasn't stopped.")
    +	}
    +
    +	// 1 successfully scraped sample, 1 stale marker after first fail, 4 report samples for
    +	// each scrape successful or not.
    +	if len(appender.result) != 14 {
    +		t.Fatalf("Appended samples not as expected. Wanted: %d samples Got: %d", 22, len(appender.result))
    +	}
    +	if appender.result[0].v != 42.0 {
    +		t.Fatalf("Appended first sample not as expected. Wanted: %f Got: %f", appender.result[0].v, 42.0)
    +	}
    +	if !value.IsStaleNaN(appender.result[5].v) {
    +		t.Fatalf("Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[5].v))
    +	}
    +}
    +
    +func TestScrapeLoopAppend(t *testing.T) {
    +
    +	tests := []struct {
    +		title           string
    +		honorLabels     bool
    +		scrapeLabels    string
    +		discoveryLabels []string
    +		expLset         labels.Labels
    +		expValue        float64
    +	}{
    +		{
    +			// When "honor_labels" is not set
    +			// label name collision is handler by adding a prefix.
    +			title:           "Label name collision",
    +			honorLabels:     false,
    +			scrapeLabels:    `metric{n="1"} 0`,
    +			discoveryLabels: []string{"n", "2"},
    +			expLset:         labels.FromStrings("__name__", "metric", "exported_n", "1", "n", "2"),
    +			expValue:        0,
    +		}, {
    +			// Labels with no value need to be removed as these should not be ingested.
    +			title:           "Delete Empty labels",
    +			honorLabels:     false,
    +			scrapeLabels:    `metric{n=""} 0`,
    +			discoveryLabels: nil,
    +			expLset:         labels.FromStrings("__name__", "metric"),
    +			expValue:        0,
    +		}, {
    +			// Honor Labels should ignore labels with the same name.
    +			title:           "Honor Labels",
    +			honorLabels:     true,
    +			scrapeLabels:    `metric{n1="1" n2="2"} 0`,
    +			discoveryLabels: []string{"n1", "0"},
    +			expLset:         labels.FromStrings("__name__", "metric", "n1", "1", "n2", "2"),
    +			expValue:        0,
    +		}, {
    +			title:           "Stale - NaN",
    +			honorLabels:     false,
    +			scrapeLabels:    `metric NaN`,
    +			discoveryLabels: nil,
    +			expLset:         labels.FromStrings("__name__", "metric"),
    +			expValue:        float64(value.NormalNaN),
    +		},
    +	}
    +
    +	for _, test := range tests {
    +		app := &collectResultAppender{}
    +
    +		discoveryLabels := &Target{
    +			labels: labels.FromStrings(test.discoveryLabels...),
    +		}
    +
    +		sl := newScrapeLoop(context.Background(),
    +			nil, nil, nil,
    +			func(l labels.Labels) labels.Labels {
    +				return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
    +			},
    +			func(l labels.Labels) labels.Labels {
    +				return mutateReportSampleLabels(l, discoveryLabels)
    +			},
    +			func() storage.Appender { return app },
    +		)
    +
    +		now := time.Now()
    +
    +		_, _, err := sl.append([]byte(test.scrapeLabels), now)
    +		if err != nil {
    +			t.Fatalf("Unexpected append error: %s", err)
    +		}
    +
    +		expected := []sample{
    +			{
    +				metric: test.expLset,
    +				t:      timestamp.FromTime(now),
    +				v:      test.expValue,
    +			},
    +		}
    +
    +		// When the expected value is NaN
    +		// DeepEqual will report NaNs as being different,
    +		// so replace it with the expected one.
    +		if test.expValue == float64(value.NormalNaN) {
    +			app.result[0].v = expected[0].v
    +		}
    +
    +		t.Logf("Test:%s", test.title)
    +		testutil.Equals(t, expected, app.result)
    +	}
    +}
    +
    +func TestScrapeLoopAppendSampleLimit(t *testing.T) {
    +	resApp := &collectResultAppender{}
    +	app := &limitAppender{Appender: resApp, limit: 1}
    +
    +	sl := newScrapeLoop(context.Background(),
    +		nil, nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		func() storage.Appender { return app },
    +	)
    +
    +	// Get the value of the Counter before performing the append.
    +	beforeMetric := dto.Metric{}
    +	err := targetScrapeSampleLimit.Write(&beforeMetric)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	beforeMetricValue := beforeMetric.GetCounter().GetValue()
    +
    +	now := time.Now()
    +	_, _, err = sl.append([]byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), now)
    +	if err != errSampleLimit {
    +		t.Fatalf("Did not see expected sample limit error: %s", err)
    +	}
    +
    +	// Check that the Counter has been incremented a simgle time for the scrape,
    +	// not multiple times for each sample.
    +	metric := dto.Metric{}
    +	err = targetScrapeSampleLimit.Write(&metric)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	value := metric.GetCounter().GetValue()
    +	if (value - beforeMetricValue) != 1 {
    +		t.Fatalf("Unexpected change of sample limit metric: %f", (value - beforeMetricValue))
    +	}
    +
    +	// And verify that we got the samples that fit under the limit.
    +	want := []sample{
    +		{
    +			metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
    +			t:      timestamp.FromTime(now),
    +			v:      1,
    +		},
    +	}
    +	if !reflect.DeepEqual(want, resApp.result) {
    +		t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, resApp.result)
    +	}
    +}
    +
    +func TestScrapeLoop_ChangingMetricString(t *testing.T) {
    +	// This is a regression test for the scrape loop cache not properly maintaining
    +	// IDs when the string representation of a metric changes across a scrape. Thus
    +	// we use a real storage appender here.
    +	s := testutil.NewStorage(t)
    +	defer s.Close()
    +
    +	app, err := s.Appender()
    +	if err != nil {
    +		t.Error(err)
    +	}
    +	capp := &collectResultAppender{next: app}
    +
    +	sl := newScrapeLoop(context.Background(),
    +		nil, nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		func() storage.Appender { return capp },
    +	)
    +
    +	now := time.Now()
    +	_, _, err = sl.append([]byte(`metric_a{a="1",b="1"} 1`), now)
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +	_, _, err = sl.append([]byte(`metric_a{b="1",a="1"} 2`), now.Add(time.Minute))
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +
    +	// DeepEqual will report NaNs as being different, so replace with a different value.
    +	want := []sample{
    +		{
    +			metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
    +			t:      timestamp.FromTime(now),
    +			v:      1,
    +		},
    +		{
    +			metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
    +			t:      timestamp.FromTime(now.Add(time.Minute)),
    +			v:      2,
    +		},
    +	}
    +	if !reflect.DeepEqual(want, capp.result) {
    +		t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, capp.result)
    +	}
    +}
    +
    +func TestScrapeLoopAppendStaleness(t *testing.T) {
    +	app := &collectResultAppender{}
    +
    +	sl := newScrapeLoop(context.Background(),
    +		nil, nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		func() storage.Appender { return app },
    +	)
    +
    +	now := time.Now()
    +	_, _, err := sl.append([]byte("metric_a 1\n"), now)
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +	_, _, err = sl.append([]byte(""), now.Add(time.Second))
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +
    +	ingestedNaN := math.Float64bits(app.result[1].v)
    +	if ingestedNaN != value.StaleNaN {
    +		t.Fatalf("Appended stale sample wasn't as expected. Wanted: %x Got: %x", value.StaleNaN, ingestedNaN)
    +	}
    +
    +	// DeepEqual will report NaNs as being different, so replace with a different value.
    +	app.result[1].v = 42
    +	want := []sample{
    +		{
    +			metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
    +			t:      timestamp.FromTime(now),
    +			v:      1,
    +		},
    +		{
    +			metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
    +			t:      timestamp.FromTime(now.Add(time.Second)),
    +			v:      42,
    +		},
    +	}
    +	if !reflect.DeepEqual(want, app.result) {
    +		t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, app.result)
    +	}
    +
    +}
    +
    +func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
    +	app := &collectResultAppender{}
    +	sl := newScrapeLoop(context.Background(),
    +		nil, nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		func() storage.Appender { return app },
    +	)
    +
    +	now := time.Now()
    +	_, _, err := sl.append([]byte("metric_a 1 1000\n"), now)
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +	_, _, err = sl.append([]byte(""), now.Add(time.Second))
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +
    +	want := []sample{
    +		{
    +			metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
    +			t:      1000,
    +			v:      1,
    +		},
    +	}
    +	if !reflect.DeepEqual(want, app.result) {
    +		t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, app.result)
    +	}
    +}
    +
    +func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
    +	var (
    +		scraper  = &testScraper{}
    +		appender = &collectResultAppender{}
    +		app      = func() storage.Appender { return appender }
    +	)
    +
    +	ctx, cancel := context.WithCancel(context.Background())
    +	sl := newScrapeLoop(ctx,
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +
    +	scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
    +		cancel()
    +		return fmt.Errorf("scrape failed")
    +	}
    +
    +	sl.run(10*time.Millisecond, time.Hour, nil)
    +
    +	if appender.result[0].v != 0 {
    +		t.Fatalf("bad 'up' value; want 0, got %v", appender.result[0].v)
    +	}
    +}
    +
    +func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
    +	var (
    +		scraper  = &testScraper{}
    +		appender = &collectResultAppender{}
    +		app      = func() storage.Appender { return appender }
    +	)
    +
    +	ctx, cancel := context.WithCancel(context.Background())
    +	sl := newScrapeLoop(ctx,
    +		scraper,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		app,
    +	)
    +
    +	scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
    +		cancel()
    +		w.Write([]byte("a{l=\"\xff\"} 1\n"))
    +		return nil
    +	}
    +
    +	sl.run(10*time.Millisecond, time.Hour, nil)
    +
    +	if appender.result[0].v != 0 {
    +		t.Fatalf("bad 'up' value; want 0, got %v", appender.result[0].v)
    +	}
    +}
    +
    +type errorAppender struct {
    +	collectResultAppender
    +}
    +
    +func (app *errorAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
    +	switch lset.Get(model.MetricNameLabel) {
    +	case "out_of_order":
    +		return 0, storage.ErrOutOfOrderSample
    +	case "amend":
    +		return 0, storage.ErrDuplicateSampleForTimestamp
    +	case "out_of_bounds":
    +		return 0, storage.ErrOutOfBounds
    +	default:
    +		return app.collectResultAppender.Add(lset, t, v)
    +	}
    +}
    +
    +func (app *errorAppender) AddFast(lset labels.Labels, ref uint64, t int64, v float64) error {
    +	return app.collectResultAppender.AddFast(lset, ref, t, v)
    +}
    +
    +func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
    +	app := &errorAppender{}
    +
    +	sl := newScrapeLoop(context.Background(),
    +		nil,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		func() storage.Appender { return app },
    +	)
    +
    +	now := time.Unix(1, 0)
    +	_, _, err := sl.append([]byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), now)
    +	if err != nil {
    +		t.Fatalf("Unexpected append error: %s", err)
    +	}
    +	want := []sample{
    +		{
    +			metric: labels.FromStrings(model.MetricNameLabel, "normal"),
    +			t:      timestamp.FromTime(now),
    +			v:      1,
    +		},
    +	}
    +	if !reflect.DeepEqual(want, app.result) {
    +		t.Fatalf("Appended samples not as expected. Wanted: %+v Got: %+v", want, app.result)
    +	}
    +}
    +
    +func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
    +	app := &collectResultAppender{}
    +	sl := newScrapeLoop(context.Background(),
    +		nil,
    +		nil, nil,
    +		nopMutator,
    +		nopMutator,
    +		func() storage.Appender {
    +			return &timeLimitAppender{
    +				Appender: app,
    +				maxTime:  timestamp.FromTime(time.Now().Add(10 * time.Minute)),
    +			}
    +		},
    +	)
    +
    +	now := time.Now().Add(20 * time.Minute)
    +	total, added, err := sl.append([]byte("normal 1\n"), now)
    +	if total != 1 {
    +		t.Error("expected 1 metric")
    +		return
    +	}
    +
    +	if added != 0 {
    +		t.Error("no metric should be added")
    +	}
    +
    +	if err != nil {
    +		t.Errorf("expect no error, got %s", err.Error())
    +	}
    +}
    +
    +func TestTargetScraperScrapeOK(t *testing.T) {
    +	const (
    +		configTimeout   = 1500 * time.Millisecond
    +		expectedTimeout = "1.500000"
    +	)
    +
    +	server := httptest.NewServer(
    +		http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    +			accept := r.Header.Get("Accept")
    +			if !strings.HasPrefix(accept, "text/plain;") {
    +				t.Errorf("Expected Accept header to prefer text/plain, got %q", accept)
    +			}
    +
    +			timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
    +			if timeout != expectedTimeout {
    +				t.Errorf("Expected scrape timeout header %q, got %q", expectedTimeout, timeout)
    +			}
    +
    +			w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
    +			w.Write([]byte("metric_a 1\nmetric_b 2\n"))
    +		}),
    +	)
    +	defer server.Close()
    +
    +	serverURL, err := url.Parse(server.URL)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	ts := &targetScraper{
    +		Target: &Target{
    +			labels: labels.FromStrings(
    +				model.SchemeLabel, serverURL.Scheme,
    +				model.AddressLabel, serverURL.Host,
    +			),
    +		},
    +		client:  http.DefaultClient,
    +		timeout: configTimeout,
    +	}
    +	var buf bytes.Buffer
    +
    +	if err := ts.scrape(context.Background(), &buf); err != nil {
    +		t.Fatalf("Unexpected scrape error: %s", err)
    +	}
    +	require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
    +}
    +
    +func TestTargetScrapeScrapeCancel(t *testing.T) {
    +	block := make(chan struct{})
    +
    +	server := httptest.NewServer(
    +		http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    +			<-block
    +		}),
    +	)
    +	defer server.Close()
    +
    +	serverURL, err := url.Parse(server.URL)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	ts := &targetScraper{
    +		Target: &Target{
    +			labels: labels.FromStrings(
    +				model.SchemeLabel, serverURL.Scheme,
    +				model.AddressLabel, serverURL.Host,
    +			),
    +		},
    +		client: http.DefaultClient,
    +	}
    +	ctx, cancel := context.WithCancel(context.Background())
    +
    +	errc := make(chan error)
    +
    +	go func() {
    +		time.Sleep(1 * time.Second)
    +		cancel()
    +	}()
    +
    +	go func() {
    +		if err := ts.scrape(ctx, ioutil.Discard); err != context.Canceled {
    +			errc <- fmt.Errorf("Expected context cancelation error but got: %s", err)
    +		}
    +		close(errc)
    +	}()
    +
    +	select {
    +	case <-time.After(5 * time.Second):
    +		t.Fatalf("Scrape function did not return unexpectedly")
    +	case err := <-errc:
    +		if err != nil {
    +			t.Fatalf(err.Error())
    +		}
    +	}
    +	// If this is closed in a defer above the function the test server
    +	// doesn't terminate and the test doesn't complete.
    +	close(block)
    +}
    +
    +func TestTargetScrapeScrapeNotFound(t *testing.T) {
    +	server := httptest.NewServer(
    +		http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    +			w.WriteHeader(http.StatusNotFound)
    +		}),
    +	)
    +	defer server.Close()
    +
    +	serverURL, err := url.Parse(server.URL)
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	ts := &targetScraper{
    +		Target: &Target{
    +			labels: labels.FromStrings(
    +				model.SchemeLabel, serverURL.Scheme,
    +				model.AddressLabel, serverURL.Host,
    +			),
    +		},
    +		client: http.DefaultClient,
    +	}
    +
    +	if err := ts.scrape(context.Background(), ioutil.Discard); !strings.Contains(err.Error(), "404") {
    +		t.Fatalf("Expected \"404 NotFound\" error but got: %s", err)
    +	}
    +}
    +
    +// testScraper implements the scraper interface and allows setting values
    +// returned by its methods. It also allows setting a custom scrape function.
    +type testScraper struct {
    +	offsetDur time.Duration
    +
    +	lastStart    time.Time
    +	lastDuration time.Duration
    +	lastError    error
    +
    +	scrapeErr  error
    +	scrapeFunc func(context.Context, io.Writer) error
    +}
    +
    +func (ts *testScraper) offset(interval time.Duration) time.Duration {
    +	return ts.offsetDur
    +}
    +
    +func (ts *testScraper) report(start time.Time, duration time.Duration, err error) {
    +	ts.lastStart = start
    +	ts.lastDuration = duration
    +	ts.lastError = err
    +}
    +
    +func (ts *testScraper) scrape(ctx context.Context, w io.Writer) error {
    +	if ts.scrapeFunc != nil {
    +		return ts.scrapeFunc(ctx, w)
    +	}
    +	return ts.scrapeErr
    +}
    diff --git a/src/prometheus/scrape/target.go b/src/prometheus/scrape/target.go
    new file mode 100644
    index 0000000..5646a0b
    --- /dev/null
    +++ b/src/prometheus/scrape/target.go
    @@ -0,0 +1,380 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"errors"
    +	"fmt"
    +	"hash/fnv"
    +	"net"
    +	"net/url"
    +	"strings"
    +	"sync"
    +	"time"
    +
    +	"github.com/prometheus/common/model"
    +
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/discovery/targetgroup"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/pkg/relabel"
    +	"github.com/prometheus/prometheus/pkg/value"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// TargetHealth describes the health state of a target.
    +type TargetHealth string
    +
    +// The possible health states of a target based on the last performed scrape.
    +const (
    +	HealthUnknown TargetHealth = "unknown"
    +	HealthGood    TargetHealth = "up"
    +	HealthBad     TargetHealth = "down"
    +)
    +
    +// Target refers to a singular HTTP or HTTPS endpoint.
    +type Target struct {
    +	// Labels before any processing.
    +	discoveredLabels labels.Labels
    +	// Any labels that are added to this target and its metrics.
    +	labels labels.Labels
    +	// Additional URL parmeters that are part of the target URL.
    +	params url.Values
    +
    +	mtx        sync.RWMutex
    +	lastError  error
    +	lastScrape time.Time
    +	health     TargetHealth
    +}
    +
    +// NewTarget creates a reasonably configured target for querying.
    +func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target {
    +	return &Target{
    +		labels:           labels,
    +		discoveredLabels: discoveredLabels,
    +		params:           params,
    +		health:           HealthUnknown,
    +	}
    +}
    +
    +func (t *Target) String() string {
    +	return t.URL().String()
    +}
    +
    +// hash returns an identifying hash for the target.
    +func (t *Target) hash() uint64 {
    +	h := fnv.New64a()
    +	h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
    +	h.Write([]byte(t.URL().String()))
    +
    +	return h.Sum64()
    +}
    +
    +// offset returns the time until the next scrape cycle for the target.
    +func (t *Target) offset(interval time.Duration) time.Duration {
    +	now := time.Now().UnixNano()
    +
    +	var (
    +		base   = now % int64(interval)
    +		offset = t.hash() % uint64(interval)
    +		next   = base + int64(offset)
    +	)
    +
    +	if next > int64(interval) {
    +		next -= int64(interval)
    +	}
    +	return time.Duration(next)
    +}
    +
    +// Labels returns a copy of the set of all public labels of the target.
    +func (t *Target) Labels() labels.Labels {
    +	lset := make(labels.Labels, 0, len(t.labels))
    +	for _, l := range t.labels {
    +		if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
    +			lset = append(lset, l)
    +		}
    +	}
    +	return lset
    +}
    +
    +// DiscoveredLabels returns a copy of the target's labels before any processing.
    +func (t *Target) DiscoveredLabels() labels.Labels {
    +	t.mtx.Lock()
    +	defer t.mtx.Unlock()
    +	lset := make(labels.Labels, len(t.discoveredLabels))
    +	copy(lset, t.discoveredLabels)
    +	return lset
    +}
    +
    +// SetDiscoveredLabels sets new DiscoveredLabels
    +func (t *Target) SetDiscoveredLabels(l labels.Labels) {
    +	t.mtx.Lock()
    +	defer t.mtx.Unlock()
    +	t.discoveredLabels = l
    +}
    +
    +// URL returns a copy of the target's URL.
    +func (t *Target) URL() *url.URL {
    +	params := url.Values{}
    +
    +	for k, v := range t.params {
    +		params[k] = make([]string, len(v))
    +		copy(params[k], v)
    +	}
    +	for _, l := range t.labels {
    +		if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) {
    +			continue
    +		}
    +		ks := l.Name[len(model.ParamLabelPrefix):]
    +
    +		if len(params[ks]) > 0 {
    +			params[ks][0] = l.Value
    +		} else {
    +			params[ks] = []string{l.Value}
    +		}
    +	}
    +
    +	return &url.URL{
    +		Scheme:   t.labels.Get(model.SchemeLabel),
    +		Host:     t.labels.Get(model.AddressLabel),
    +		Path:     t.labels.Get(model.MetricsPathLabel),
    +		RawQuery: params.Encode(),
    +	}
    +}
    +
    +func (t *Target) report(start time.Time, dur time.Duration, err error) {
    +	t.mtx.Lock()
    +	defer t.mtx.Unlock()
    +
    +	if err == nil {
    +		t.health = HealthGood
    +	} else {
    +		t.health = HealthBad
    +	}
    +
    +	t.lastError = err
    +	t.lastScrape = start
    +}
    +
    +// LastError returns the error encountered during the last scrape.
    +func (t *Target) LastError() error {
    +	t.mtx.RLock()
    +	defer t.mtx.RUnlock()
    +
    +	return t.lastError
    +}
    +
    +// LastScrape returns the time of the last scrape.
    +func (t *Target) LastScrape() time.Time {
    +	t.mtx.RLock()
    +	defer t.mtx.RUnlock()
    +
    +	return t.lastScrape
    +}
    +
    +// Health returns the last known health state of the target.
    +func (t *Target) Health() TargetHealth {
    +	t.mtx.RLock()
    +	defer t.mtx.RUnlock()
    +
    +	return t.health
    +}
    +
    +// Targets is a sortable list of targets.
    +type Targets []*Target
    +
    +func (ts Targets) Len() int           { return len(ts) }
    +func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() }
    +func (ts Targets) Swap(i, j int)      { ts[i], ts[j] = ts[j], ts[i] }
    +
    +var errSampleLimit = errors.New("sample limit exceeded")
    +
    +// limitAppender limits the number of total appended samples in a batch.
    +type limitAppender struct {
    +	storage.Appender
    +
    +	limit int
    +	i     int
    +}
    +
    +func (app *limitAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
    +	if !value.IsStaleNaN(v) {
    +		app.i++
    +		if app.i > app.limit {
    +			return 0, errSampleLimit
    +		}
    +	}
    +	ref, err := app.Appender.Add(lset, t, v)
    +	if err != nil {
    +		return 0, err
    +	}
    +	return ref, nil
    +}
    +
    +func (app *limitAppender) AddFast(lset labels.Labels, ref uint64, t int64, v float64) error {
    +	if !value.IsStaleNaN(v) {
    +		app.i++
    +		if app.i > app.limit {
    +			return errSampleLimit
    +		}
    +	}
    +	err := app.Appender.AddFast(lset, ref, t, v)
    +	return err
    +}
    +
    +type timeLimitAppender struct {
    +	storage.Appender
    +
    +	maxTime int64
    +}
    +
    +func (app *timeLimitAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
    +	if t > app.maxTime {
    +		return 0, storage.ErrOutOfBounds
    +	}
    +
    +	ref, err := app.Appender.Add(lset, t, v)
    +	if err != nil {
    +		return 0, err
    +	}
    +	return ref, nil
    +}
    +
    +func (app *timeLimitAppender) AddFast(lset labels.Labels, ref uint64, t int64, v float64) error {
    +	if t > app.maxTime {
    +		return storage.ErrOutOfBounds
    +	}
    +	err := app.Appender.AddFast(lset, ref, t, v)
    +	return err
    +}
    +
    +// populateLabels builds a label set from the given label set and scrape configuration.
    +// It returns a label set before relabeling was applied as the second return value.
    +// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
    +func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
    +	// Copy labels into the labelset for the target if they are not set already.
    +	scrapeLabels := []labels.Label{
    +		{Name: model.JobLabel, Value: cfg.JobName},
    +		{Name: model.MetricsPathLabel, Value: cfg.MetricsPath},
    +		{Name: model.SchemeLabel, Value: cfg.Scheme},
    +	}
    +	lb := labels.NewBuilder(lset)
    +
    +	for _, l := range scrapeLabels {
    +		if lv := lset.Get(l.Name); lv == "" {
    +			lb.Set(l.Name, l.Value)
    +		}
    +	}
    +	// Encode scrape query parameters as labels.
    +	for k, v := range cfg.Params {
    +		if len(v) > 0 {
    +			lb.Set(model.ParamLabelPrefix+k, v[0])
    +		}
    +	}
    +
    +	preRelabelLabels := lb.Labels()
    +	lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...)
    +
    +	// Check if the target was dropped.
    +	if lset == nil {
    +		return nil, preRelabelLabels, nil
    +	}
    +	if v := lset.Get(model.AddressLabel); v == "" {
    +		return nil, nil, fmt.Errorf("no address")
    +	}
    +
    +	lb = labels.NewBuilder(lset)
    +
    +	// addPort checks whether we should add a default port to the address.
    +	// If the address is not valid, we don't append a port either.
    +	addPort := func(s string) bool {
    +		// If we can split, a port exists and we don't have to add one.
    +		if _, _, err := net.SplitHostPort(s); err == nil {
    +			return false
    +		}
    +		// If adding a port makes it valid, the previous error
    +		// was not due to an invalid address and we can append a port.
    +		_, _, err := net.SplitHostPort(s + ":1234")
    +		return err == nil
    +	}
    +	addr := lset.Get(model.AddressLabel)
    +	// If it's an address with no trailing port, infer it based on the used scheme.
    +	if addPort(addr) {
    +		// Addresses reaching this point are already wrapped in [] if necessary.
    +		switch lset.Get(model.SchemeLabel) {
    +		case "http", "":
    +			addr = addr + ":80"
    +		case "https":
    +			addr = addr + ":443"
    +		default:
    +			return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme)
    +		}
    +		lb.Set(model.AddressLabel, addr)
    +	}
    +
    +	if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
    +		return nil, nil, err
    +	}
    +
    +	// Meta labels are deleted after relabelling. Other internal labels propagate to
    +	// the target which decides whether they will be part of their label set.
    +	for _, l := range lset {
    +		if strings.HasPrefix(l.Name, model.MetaLabelPrefix) {
    +			lb.Del(l.Name)
    +		}
    +	}
    +
    +	// Default the instance label to the target address.
    +	if v := lset.Get(model.InstanceLabel); v == "" {
    +		lb.Set(model.InstanceLabel, addr)
    +	}
    +
    +	res = lb.Labels()
    +	for _, l := range res {
    +		// Check label values are valid, drop the target if not.
    +		if !model.LabelValue(l.Value).IsValid() {
    +			return nil, nil, fmt.Errorf("invalid label value for %q: %q", l.Name, l.Value)
    +		}
    +	}
    +	return res, preRelabelLabels, nil
    +}
    +
    +// targetsFromGroup builds targets based on the given TargetGroup and config.
    +func targetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, error) {
    +	targets := make([]*Target, 0, len(tg.Targets))
    +
    +	for i, tlset := range tg.Targets {
    +		lbls := make([]labels.Label, 0, len(tlset)+len(tg.Labels))
    +
    +		for ln, lv := range tlset {
    +			lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
    +		}
    +		for ln, lv := range tg.Labels {
    +			if _, ok := tlset[ln]; !ok {
    +				lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
    +			}
    +		}
    +
    +		lset := labels.New(lbls...)
    +
    +		lbls, origLabels, err := populateLabels(lset, cfg)
    +		if err != nil {
    +			return nil, fmt.Errorf("instance %d in group %s: %s", i, tg, err)
    +		}
    +		if lbls != nil || origLabels != nil {
    +			targets = append(targets, NewTarget(lbls, origLabels, cfg.Params))
    +		}
    +	}
    +	return targets, nil
    +}
    diff --git a/src/prometheus/scrape/target_test.go b/src/prometheus/scrape/target_test.go
    new file mode 100644
    index 0000000..49272d4
    --- /dev/null
    +++ b/src/prometheus/scrape/target_test.go
    @@ -0,0 +1,372 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package scrape
    +
    +import (
    +	"crypto/tls"
    +	"crypto/x509"
    +	"fmt"
    +	"io/ioutil"
    +	"net/http"
    +	"net/http/httptest"
    +	"net/url"
    +	"reflect"
    +	"strings"
    +	"testing"
    +	"time"
    +
    +	"github.com/prometheus/common/model"
    +
    +	config_util "github.com/prometheus/common/config"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +)
    +
    +const (
    +	caCertPath = "testdata/ca.cer"
    +)
    +
    +func TestTargetLabels(t *testing.T) {
    +	target := newTestTarget("example.com:80", 0, labels.FromStrings("job", "some_job", "foo", "bar"))
    +	want := labels.FromStrings(model.JobLabel, "some_job", "foo", "bar")
    +	got := target.Labels()
    +	if !reflect.DeepEqual(want, got) {
    +		t.Errorf("want base labels %v, got %v", want, got)
    +	}
    +}
    +
    +func TestTargetOffset(t *testing.T) {
    +	interval := 10 * time.Second
    +
    +	offsets := make([]time.Duration, 10000)
    +
    +	// Calculate offsets for 10000 different targets.
    +	for i := range offsets {
    +		target := newTestTarget("example.com:80", 0, labels.FromStrings(
    +			"label", fmt.Sprintf("%d", i),
    +		))
    +		offsets[i] = target.offset(interval)
    +	}
    +
    +	// Put the offsets into buckets and validate that they are all
    +	// within bounds.
    +	bucketSize := 1 * time.Second
    +	buckets := make([]int, interval/bucketSize)
    +
    +	for _, offset := range offsets {
    +		if offset < 0 || offset >= interval {
    +			t.Fatalf("Offset %v out of bounds", offset)
    +		}
    +
    +		bucket := offset / bucketSize
    +		buckets[bucket]++
    +	}
    +
    +	t.Log(buckets)
    +
    +	// Calculate whether the the number of targets per bucket
    +	// does not differ more than a given tolerance.
    +	avg := len(offsets) / len(buckets)
    +	tolerance := 0.15
    +
    +	for _, bucket := range buckets {
    +		diff := bucket - avg
    +		if diff < 0 {
    +			diff = -diff
    +		}
    +
    +		if float64(diff)/float64(avg) > tolerance {
    +			t.Fatalf("Bucket out of tolerance bounds")
    +		}
    +	}
    +}
    +
    +func TestTargetURL(t *testing.T) {
    +	params := url.Values{
    +		"abc": []string{"foo", "bar", "baz"},
    +		"xyz": []string{"hoo"},
    +	}
    +	labels := labels.FromMap(map[string]string{
    +		model.AddressLabel:     "example.com:1234",
    +		model.SchemeLabel:      "https",
    +		model.MetricsPathLabel: "/metricz",
    +		"__param_abc":          "overwrite",
    +		"__param_cde":          "huu",
    +	})
    +	target := NewTarget(labels, labels, params)
    +
    +	// The reserved labels are concatenated into a full URL. The first value for each
    +	// URL query parameter can be set/modified via labels as well.
    +	expectedParams := url.Values{
    +		"abc": []string{"overwrite", "bar", "baz"},
    +		"cde": []string{"huu"},
    +		"xyz": []string{"hoo"},
    +	}
    +	expectedURL := url.URL{
    +		Scheme:   "https",
    +		Host:     "example.com:1234",
    +		Path:     "/metricz",
    +		RawQuery: expectedParams.Encode(),
    +	}
    +
    +	if u := target.URL(); !reflect.DeepEqual(u.String(), expectedURL.String()) {
    +		t.Fatalf("Expected URL %q, but got %q", expectedURL.String(), u.String())
    +	}
    +}
    +
    +func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target {
    +	lb := labels.NewBuilder(lbls)
    +	lb.Set(model.SchemeLabel, "http")
    +	lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))
    +	lb.Set(model.MetricsPathLabel, "/metrics")
    +
    +	return &Target{labels: lb.Labels()}
    +}
    +
    +func TestNewHTTPBearerToken(t *testing.T) {
    +	server := httptest.NewServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				expected := "Bearer 1234"
    +				received := r.Header.Get("Authorization")
    +				if expected != received {
    +					t.Fatalf("Authorization header was not set correctly: expected '%v', got '%v'", expected, received)
    +				}
    +			},
    +		),
    +	)
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		BearerToken: "1234",
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +}
    +
    +func TestNewHTTPBearerTokenFile(t *testing.T) {
    +	server := httptest.NewServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				expected := "Bearer 12345"
    +				received := r.Header.Get("Authorization")
    +				if expected != received {
    +					t.Fatalf("Authorization header was not set correctly: expected '%v', got '%v'", expected, received)
    +				}
    +			},
    +		),
    +	)
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		BearerTokenFile: "testdata/bearertoken.txt",
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +}
    +
    +func TestNewHTTPBasicAuth(t *testing.T) {
    +	server := httptest.NewServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				username, password, ok := r.BasicAuth()
    +				if !(ok && username == "user" && password == "password123") {
    +					t.Fatalf("Basic authorization header was not set correctly: expected '%v:%v', got '%v:%v'", "user", "password123", username, password)
    +				}
    +			},
    +		),
    +	)
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		BasicAuth: &config_util.BasicAuth{
    +			Username: "user",
    +			Password: "password123",
    +		},
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +}
    +
    +func TestNewHTTPCACert(t *testing.T) {
    +	server := httptest.NewUnstartedServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
    +				w.Write([]byte{})
    +			},
    +		),
    +	)
    +	server.TLS = newTLSConfig("server", t)
    +	server.StartTLS()
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		TLSConfig: config_util.TLSConfig{
    +			CAFile: caCertPath,
    +		},
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +}
    +
    +func TestNewHTTPClientCert(t *testing.T) {
    +	server := httptest.NewUnstartedServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
    +				w.Write([]byte{})
    +			},
    +		),
    +	)
    +	tlsConfig := newTLSConfig("server", t)
    +	tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
    +	tlsConfig.ClientCAs = tlsConfig.RootCAs
    +	tlsConfig.BuildNameToCertificate()
    +	server.TLS = tlsConfig
    +	server.StartTLS()
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		TLSConfig: config_util.TLSConfig{
    +			CAFile:   caCertPath,
    +			CertFile: "testdata/client.cer",
    +			KeyFile:  "testdata/client.key",
    +		},
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +}
    +
    +func TestNewHTTPWithServerName(t *testing.T) {
    +	server := httptest.NewUnstartedServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
    +				w.Write([]byte{})
    +			},
    +		),
    +	)
    +	server.TLS = newTLSConfig("servername", t)
    +	server.StartTLS()
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		TLSConfig: config_util.TLSConfig{
    +			CAFile:     caCertPath,
    +			ServerName: "prometheus.rocks",
    +		},
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +}
    +
    +func TestNewHTTPWithBadServerName(t *testing.T) {
    +	server := httptest.NewUnstartedServer(
    +		http.HandlerFunc(
    +			func(w http.ResponseWriter, r *http.Request) {
    +				w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
    +				w.Write([]byte{})
    +			},
    +		),
    +	)
    +	server.TLS = newTLSConfig("servername", t)
    +	server.StartTLS()
    +	defer server.Close()
    +
    +	cfg := config_util.HTTPClientConfig{
    +		TLSConfig: config_util.TLSConfig{
    +			CAFile:     caCertPath,
    +			ServerName: "badname",
    +		},
    +	}
    +	c, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +	_, err = c.Get(server.URL)
    +	if err == nil {
    +		t.Fatal("Expected error, got nil.")
    +	}
    +}
    +
    +func newTLSConfig(certName string, t *testing.T) *tls.Config {
    +	tlsConfig := &tls.Config{}
    +	caCertPool := x509.NewCertPool()
    +	caCert, err := ioutil.ReadFile(caCertPath)
    +	if err != nil {
    +		t.Fatalf("Couldn't set up TLS server: %v", err)
    +	}
    +	caCertPool.AppendCertsFromPEM(caCert)
    +	tlsConfig.RootCAs = caCertPool
    +	tlsConfig.ServerName = "127.0.0.1"
    +	certPath := fmt.Sprintf("testdata/%s.cer", certName)
    +	keyPath := fmt.Sprintf("testdata/%s.key", certName)
    +	cert, err := tls.LoadX509KeyPair(certPath, keyPath)
    +	if err != nil {
    +		t.Errorf("Unable to use specified server cert (%s) & key (%v): %s", certPath, keyPath, err)
    +	}
    +	tlsConfig.Certificates = []tls.Certificate{cert}
    +	tlsConfig.BuildNameToCertificate()
    +	return tlsConfig
    +}
    +
    +func TestNewClientWithBadTLSConfig(t *testing.T) {
    +	cfg := config_util.HTTPClientConfig{
    +		TLSConfig: config_util.TLSConfig{
    +			CAFile:   "testdata/nonexistent_ca.cer",
    +			CertFile: "testdata/nonexistent_client.cer",
    +			KeyFile:  "testdata/nonexistent_client.key",
    +		},
    +	}
    +	_, err := config_util.NewClientFromConfig(cfg, "test")
    +	if err == nil {
    +		t.Fatalf("Expected error, got nil.")
    +	}
    +}
    diff --git a/src/prometheus/scrape/testdata/bearertoken.txt b/src/prometheus/scrape/testdata/bearertoken.txt
    new file mode 100644
    index 0000000..e56e15b
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/bearertoken.txt
    @@ -0,0 +1 @@
    +12345
    diff --git a/src/prometheus/scrape/testdata/ca.cer b/src/prometheus/scrape/testdata/ca.cer
    new file mode 100644
    index 0000000..86f627a
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/ca.cer
    @@ -0,0 +1,22 @@
    +-----BEGIN CERTIFICATE-----
    +MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
    +BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
    +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
    +MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
    +DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
    +BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
    +ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
    +t4kjBRWzqkZ5uJVkzBPERKEBoOI9pWcrqtMTBkMzHJY2Ep7GHTab10e9KC2IFQT6
    +FKP/jCYixaIVx3azEfajRJooD8r79FGoagWUfHdHyCFWJb/iLt8z8+S91kelSRMS
    +yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
    +VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
    +w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
    +BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
    +rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu
    +e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1
    +0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k
    +pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH
    +U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx
    +j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU
    +mM5lH/s=
    +-----END CERTIFICATE-----
    diff --git a/src/prometheus/scrape/testdata/ca.key b/src/prometheus/scrape/testdata/ca.key
    new file mode 100644
    index 0000000..1db2600
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/ca.key
    @@ -0,0 +1,27 @@
    +-----BEGIN RSA PRIVATE KEY-----
    +MIIEpgIBAAKCAQEA6VIFTfJalQQtuHOLPOdHSGcAvt1sQfN8S0Rzo3c+hK8w1yqt
    +RWq3iSMFFbOqRnm4lWTME8REoQGg4j2lZyuq0xMGQzMcljYSnsYdNpvXR70oLYgV
    +BPoUo/+MJiLFohXHdrMR9qNEmigPyvv0UahqBZR8d0fIIVYlv+Iu3zPz5L3WR6VJ
    +ExLIH0zXKlaibMHPVQVdmnWiI07mjv92BdbgyAtR8Lahsn2PnGqlzoaSFYxbhkXA
    +Sv9UuO7PEb0Hy0t9C6GdcoafVbz5bNlVYJYwP+wt9l+aMppaXBxfMvnWDwT4gQgE
    +mpXDnVowjqUORy8AzuAurVq4tkxVfIpZIrDtcwIDAQABAoIBAQCcVDd3pYWpyLX1
    +m31UnkX1rgYi3Gs3uTOznra4dSIvds6LrG2SUFGPEibLBql1NQNHHdVa/StakaPB
    +UrqraOe5K0sL5Ygm4S4Ssf1K5JoW2Be+gipLPmBsDcJSnwO6eUs/LfZAQd6qR2Nl
    +hvGJcQUwne/TYAYox/bdHWh4Zu/odz4NrZKZLbnXkdLLDEhZbjA0HpwJZ7NpMcB7
    +Z6NayOm5dAZncfqBjY+3GNL0VjvDjwwYbESM8GkAbojMgcpODGk0h9arRWCP2RqT
    +SVgmiFI2mVT7sW1XLdVXmyCL2jzak7sktpbLbVgngwOrBmLO/m4NBftzcZrgvxj3
    +YakCPH/hAoGBAP1v85pIxqWr5dFdRlOW0MG35ifL+YXpavcs233jGDHYNZefrR5q
    +Mw8eA20zwj41OdryqGh58nLYm3zYM0vPFrRJrzWYQfcWDmQELAylr9z9vsMj8gRq
    +IZQD6wzFmLi1PN2QDmovF+2y/CLAq03XK6FQlNsVQxubfjh4hcX5+nXDAoGBAOut
    +/pQaIBbIhaI8y3KjpizU24jxIkV8R/q1yE5V01YCl2OC5hEd4iZP14YLDRXLSHKT
    +e/dyJ/OEyTIzUeDg0ZF3ao9ugbWuASgrnrrdPEooi7C9n9PeaLFTK5oVZoVP2A7E
    +BwhSFW3VdEzQkdJczVE2jOY6JdBKMndjoDQnhT6RAoGBAL4WMO1gdnYeZ0JQJoZd
    +kPgrOZpR2DaDa3I3F+3k3enM0+2EmzE70E4fYcyPTLqh62H4LS4ngRx4sK7D7j2G
    +9u2EcsDNEXUE+wgzROK7hxtGysTMeiKrg8Hj6nFq53Bqp1s7SESGS/lCDPD398Rr
    +hdL5gJyN5waW6uXqJ9Pk+eFHAoGBAKV/YGcV1XTKSPT9ZgxRmM6ghq0qT1umA1Gt
    +t0QzBp2+Yhqx/+cDKhynMnxhZEXqoyw6HvJLSny5wSMsYJHeratNxRmFizZOQ2e3
    +AdbMppqY0EdDUWnRI4lqExM3de+let4bj6irI3smSm3qhIvJOTCPcu/04zrZ74hh
    +AE2/dtTRAoGBAO6bENEqLgxZOvX5NnbytTuuoEnbceUPiIvc6S/nWJPEoGXVN2EJ
    +a3OaIOQmknE6bjXIWrHTaXJhwejvPUz9DVa4GxU5aJhs4gpocVGf+owQFvk4nJO8
    +JL+QVVdXp3XdrXIGyvXJfy0fXXgJg5czrnDHjSTE8/2POtyuZ6VyBtQc
    +-----END RSA PRIVATE KEY-----
    diff --git a/src/prometheus/scrape/testdata/client.cer b/src/prometheus/scrape/testdata/client.cer
    new file mode 100644
    index 0000000..eeeeca9
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/client.cer
    @@ -0,0 +1,25 @@
    +-----BEGIN CERTIFICATE-----
    +MIIERjCCAy6gAwIBAgIBZDANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJYWDEV
    +MBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkg
    +THRkMRswGQYDVQQDDBJQcm9tZXRoZXVzIFRlc3QgQ0EwHhcNMTUwODA0MTQ0MTE2
    +WhcNNDIxMjIwMTQ0MTE2WjBVMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVs
    +dCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMREwDwYDVQQDDAh0
    +ZXN0dXNlcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOKBBXx35X9+
    +BLGqY/cC2+lQYZzn13Z8ZEDrUKpv5n91QA0B/YZE3gDSnk2yry8dxmp1NJtXm8Wr
    +rIQSBnsTGOKwyIwR1gcggUYPD9fCyy7T7y7YbzBG8drEcxiK/YIWyio0fpRCfT9b
    +2+fOEeY+0+tgFV++XjbXVzXRCBMmsZ22cOm4t2t7GHKBZhYoUoPgKjDn+4t/rr0r
    +1od6yVOocYCo6RruQHsWPHj6QlU8VGutkD7PpvLS+w2l/6JqmZDHlY6o6pDidC8a
    +kp8i/t3pNBlexk6st/8YZ5S9j6LjqC6bUnerUZB40b6L8OXXwWS3S5y6t07A1QIn
    +Pv2DZKGbn8Uuj7RvS5OAZdDn1P+M5aVlRLoYbdTHJILrLg+bxyDIokqONbLgj78A
    +FT6a013eJAZJBkeoaN7Djbf/d5FjRDadH2bX0Uur3APh4cbv+0Fo13CPPSckA9EU
    +o42qBmKLWys858D8vRKyS/mq/IeRL0AIwKuaEIJtPtiwCTnk6PvFfQvO80z/Eyq+
    +uvRBoZbrWHb+3GR8rNzu8Gc1UbTC+jnGYtbQhxx1/7nae52XGRpplnwPO9cb+px2
    +Zf802h+lP3SMY/XS+nyTAp/jcy/jOAwrZKY4rgz+5ZmKCI61NZ0iovaK7Jqo9qTM
    +iSjykZCamFhm4pg8itECD5FhnUetJ6axAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsG
    +AQUFBwMCMA0GCSqGSIb3DQEBBQUAA4IBAQDEQyFfY9WAkdnzb+vIlICgfkydceYx
    +KVJZ2WRMvrn2ZoRoSaK3CfGlz4nrCOgDjQxfX8OpKzudr/ghuBQCbDHHzxRrOen5
    +0Zig9Q+pxTZNrtds/SwX2dHJ7PVEwGxXXaKl8S19bNEdO0syFrRJU6I50ZbeEkJe
    +RI9IEFvBHcuG/GnEfqWj2ozI/+VhIOb4cTItg67ClmIPe8lteT2wj+/aydF9PKgF
    +QhooCe/G1nok1uiaGjo1HzFEn4HzI3s4mrolc8PpBBVsS+HckCOrHpRPWnYuCFEm
    +0yzS6tGaMrnITywwB2/uJ2aBAZIx2Go1zFhPf0YvFJc3e2x8cAuqBRLu
    +-----END CERTIFICATE-----
    diff --git a/src/prometheus/scrape/testdata/client.key b/src/prometheus/scrape/testdata/client.key
    new file mode 100644
    index 0000000..e584b7e
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/client.key
    @@ -0,0 +1,51 @@
    +-----BEGIN RSA PRIVATE KEY-----
    +MIIJKAIBAAKCAgEA4oEFfHflf34Esapj9wLb6VBhnOfXdnxkQOtQqm/mf3VADQH9
    +hkTeANKeTbKvLx3GanU0m1ebxaushBIGexMY4rDIjBHWByCBRg8P18LLLtPvLthv
    +MEbx2sRzGIr9ghbKKjR+lEJ9P1vb584R5j7T62AVX75eNtdXNdEIEyaxnbZw6bi3
    +a3sYcoFmFihSg+AqMOf7i3+uvSvWh3rJU6hxgKjpGu5AexY8ePpCVTxUa62QPs+m
    +8tL7DaX/omqZkMeVjqjqkOJ0LxqSnyL+3ek0GV7GTqy3/xhnlL2PouOoLptSd6tR
    +kHjRvovw5dfBZLdLnLq3TsDVAic+/YNkoZufxS6PtG9Lk4Bl0OfU/4zlpWVEuhht
    +1MckgusuD5vHIMiiSo41suCPvwAVPprTXd4kBkkGR6ho3sONt/93kWNENp0fZtfR
    +S6vcA+Hhxu/7QWjXcI89JyQD0RSjjaoGYotbKzznwPy9ErJL+ar8h5EvQAjAq5oQ
    +gm0+2LAJOeTo+8V9C87zTP8TKr669EGhlutYdv7cZHys3O7wZzVRtML6OcZi1tCH
    +HHX/udp7nZcZGmmWfA871xv6nHZl/zTaH6U/dIxj9dL6fJMCn+NzL+M4DCtkpjiu
    +DP7lmYoIjrU1nSKi9orsmqj2pMyJKPKRkJqYWGbimDyK0QIPkWGdR60nprECAwEA
    +AQKCAgEA18az1ERf9Fm33Q0GmE039IdnxlMy9qQ/2XyS5xsdCXVIZFvuClhW6Y+7
    +0ScVLpx95fLr/8SxF9mYymRlmh+ySFrDYnSnYTi9DmHQ5OmkKGMr64OyQNqFErSt
    +NMdMA/7z7sr9fv3sVUyMLMMqWB6oQgXRttki5bm1UgZlW+EzuZwQ6wbWbWTiAEt3
    +VkppeUo2x0poXxdu/rXhdEUrwC+qmTfQgaBQ+zFOwK0gPhTwE3hP/xZQ4+jL08+8
    +vRwyWTNZLYOLmiSxLCJzZXiwNfUwda7M2iw+SJ0WKCOBz1pzYJsFMA2b8Ta4EX89
    +Kailiu328UMK19Jp2dhLcLUYS8B2rVVAK5b/O6iKV8UpKTriXDiCKSpcugpsQ1ML
    +zq/6vR0SQXD+/W0MesGaNa33votBXJSsf9kZnYJw43n+W4Z/XFUE5pyNM/+TGAqw
    +yuF4FX2sJL1uP5VMOh2HdthTr+/ewx/Trn9/re0p54z83plVlp4qbcORLiQ2uDf6
    +ZZ0/gHzNTp4Fzz81ZvHLm9smpe8cLvojrKLvCl0hv5zAf3QtsajpTN9uM7AsshV1
    +QVZSuAxb5n9bcij5F2za1/dd7WLlvsSzgNJ4Td/gEDI8qepB0+7PGlJ17sMg0nWP
    +nFxUfGIsCF1KOoPwLyaNHHrRGjJigFUufqkbmSWkOzgC6pZVUXECggEBAP81To16
    +O5BlGDahcQkjKkqUwUtkhjE9/KQBh3zHqxsitI8f0U7eL3Ge1qhbgEgvHwHOjWSV
    +pcG9atE55b7qlqqGQboiO1jfyLfIVLfamj0fHLinO/pV/wcBNy6Hz4rP7DNJDCMz
    +0agz/Ys3VXrZIk5sO0sUBYMBxho1x0n65Z06iK1SwD/x4Xg3/Psyx+ujEEkSsv5I
    +Gg7aOTHLRSIPUx/OK+4M3sp58PeMGfEYNYxNiEoMiUQgu/srKRjs+pUKXCkEraNW
    +8s/ODYJ7iso6Z1z4NxfBH+hh+UrxTffh7t0Sz5gdUwUnBNb2I4EdeCcCTOnWYkut
    +/GKW8oHD7f9VDS0CggEBAOM06rrp9rSsl6UhTu8LS5bjBeyUxab4HLZKP5YBitQO
    +ltcPS05MxQ3UQ1BAMDRjXE2nrKlWMOAybrffEXBi4U1jYt7CiuCwwsPyaYNWT5qO
    +Iwdjebkeq3+Mh8c48swhOwRLWSGg6mtRoR/c5cthYU62+s2zdxc/yhVTQ0WNFabT
    +23PYtjjW41WuR6K7Dhrdcw0MwIs1arZHTsDdU6Hln9raTSNwlHMBWVz/tzuwLieQ
    +WEUXvsQvPtgPyohmDd0ueXiuS2FiYaXKFIMFj5/JyyJc1OCr1vIQN8mMcUjNbk2I
    +VaeeSPawgKIiYARhbjJtjwjY6D59gOZrNGYASQOTGhUCggEAJPOB8SgekbShgd90
    +L1+BExVgu1rNtzmDZ/e0t1Ntqdsni4WO172B3xChgfTlqQ3xjmBqxoKIYnnbinm4
    +kyECOaSAxcOJFkAonruJ0Kj9JhZoITBNldx3tXruk3UkjrO2PmK4OCybkaAdeNfF
    +L6lat0Iif6dheOt71HWu6j5CmrZL7dSKc3fBLpfksDZVDgApLntfoUOtSjM8jsIg
    +u2K+pV9Dqw7//w8S3bTSWL8pmavsLNSN12hp7177b1l4mrXKTEIaJglD1OS/vgHH
    +QaqdJq/lwjG7PflZkAlKQbbbz/SWTC8Kwzc4EyvGTj6HFBbYLg9VYiHJ5jh22mUV
    +A6A77QKCAQAM6DWpdp8QNnnK5LCCPecGZFEy1mTADno7FM6169KCJ24EO5cwlIXh
    +Ojy0s2DJqRdWRf82A3J1WggWI/Luqn9YERxNwUl4aDI4RW4fCuksw4RT6B/DF23w
    +qgAQnjiUxhJ/NPSUR3rpq9J2Z+sZ+ac4fIaU5uwOAw6s1XUN32zqdECUPSxk4Dg7
    +5tGk+fFcL1ZY2G+buOYeAsEDjc8xdET3fs1BBSU5v0rfUJuNJX4Ju1Z4Xlf09yYf
    +yg3cX8fL19cItwYLOzaG34r4wnkdP65tfk6NkNV+HNO+fF73Hsx0VRlgk0pb0T0N
    +eNxxg0NqU/T7MK9I1YJcFJz+ame7b0DdAoIBAFw3Sf9LbVVNh8ef4OqjBZR8RCYq
    +4HeG0FPYvMLzUtFi7j4uBfiL4+pNpSFvecSuLRKE8Pr5dPRJNPNgJud5gvuykBZX
    +Q9ktQJTAPZK8Q5neLeXfAdoF3szJuEZbDdGSps4JFokVIX+h3c+uFRD9QMSh+bz1
    +nEXCdYvmTs+bsTL+l7cbXq2iIKk1QnEcL+cRYr3VjP5xxZ/hGnuYqe9wmyo2MVkS
    +NVUmCifIvE34TO072HH49gVPrhj9qIZsfBh4LBpl75eKwXTXx+HFqHhP8OfzuK6U
    +v/JQn9JUGGzkmoMazQ9o5D5h/o0t/OGOPnQeqWL4BIPXdHv/dua6jLnAoU8=
    +-----END RSA PRIVATE KEY-----
    diff --git a/src/prometheus/scrape/testdata/server.cer b/src/prometheus/scrape/testdata/server.cer
    new file mode 100644
    index 0000000..8dcb154
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/server.cer
    @@ -0,0 +1,20 @@
    +-----BEGIN CERTIFICATE-----
    +MIIDSzCCAjOgAwIBAgIJAPn0lI/95RQVMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV
    +BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
    +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
    +MDQxNDE5MjRaFw00MjEyMjAxNDE5MjRaMFYxCzAJBgNVBAYTAlhYMRUwEwYDVQQH
    +DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEjAQ
    +BgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
    +AMQhH0walZlA+Gy5ZB3YzzxZta7mhTX3P+yBeQ6G6yrei4H7gv+MTCJj5qUBc+BS
    +cta8loKKUQWjoppjyh4tz8awkTD5sEyedE7/G3DS7mLgmx0PslwqrkXFBQhm/C2f
    +aZfSO69TZ8uu1dgCmmGe9K2XqPnR6fu9egtLpK8RT0s/Cx04bFnaPS0ecyj+3q7A
    +xzDsH84Z1KPo4LHgqNWlHqFsQPqH+7W9ajhF6lnO4ArEDJ3KuLDlgrENzCsDabls
    +0U2XsccBJzP+Ls+iQwMfKpx2ISQDHqniopSICw+sPufiAv+OGnnG6rGGWQjUstqf
    +w4DnU4DZvkrcEWoGa6fq26kCAwEAAaMTMBEwDwYDVR0RBAgwBocEfwAAATANBgkq
    +hkiG9w0BAQUFAAOCAQEAVPs8IZffawWuRqbXJSvFz7a1q95febWQFjvvMe8ZJeCZ
    +y1k9laQ5ZLHYuQ6NUWn09UbQNtK3fCLF4sJx5PCPCp1vZWx4nJs8N5mNyqdQ1Zfk
    +oyoYTOR2izNcIj6ZUFRoOR/7B9hl2JouCXrbExr96oO13xIfsdslScINz1X68oyW
    +KjU0yUrY+lWG1zEkUGXti9K6ujtXa7YY2n3nK/CvIqny5nVToYUgEMpjUR9S+KgN
    +JUtawY3VQKyp6ZXlHqa0ihsuvY9Hrlh14h0AsZchPAHUtDFv2nEQob/Kf1XynKw6
    +itVKcj/UFpkhsnc/19aP1gWje76fejXl0tzyPXDXFg==
    +-----END CERTIFICATE-----
    diff --git a/src/prometheus/scrape/testdata/server.key b/src/prometheus/scrape/testdata/server.key
    new file mode 100644
    index 0000000..2266b01
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/server.key
    @@ -0,0 +1,27 @@
    +-----BEGIN RSA PRIVATE KEY-----
    +MIIEpAIBAAKCAQEAxCEfTBqVmUD4bLlkHdjPPFm1ruaFNfc/7IF5DobrKt6LgfuC
    +/4xMImPmpQFz4FJy1ryWgopRBaOimmPKHi3PxrCRMPmwTJ50Tv8bcNLuYuCbHQ+y
    +XCquRcUFCGb8LZ9pl9I7r1Nny67V2AKaYZ70rZeo+dHp+716C0ukrxFPSz8LHThs
    +Wdo9LR5zKP7ersDHMOwfzhnUo+jgseCo1aUeoWxA+of7tb1qOEXqWc7gCsQMncq4
    +sOWCsQ3MKwNpuWzRTZexxwEnM/4uz6JDAx8qnHYhJAMeqeKilIgLD6w+5+IC/44a
    +ecbqsYZZCNSy2p/DgOdTgNm+StwRagZrp+rbqQIDAQABAoIBACeOjqNo0TdhtTko
    +gxrJ+bIwXcZy0/c4cPogeuwFJjU1QWnr8lXcVBazk3dAPcDGoEbTLoARqZm7kTYW
    +XlOL5dYrEn2QPpCVfNvZ9AzjXhUvO9m2qsCQEyobPJKfQslo14E5c7Q+3DZmgtbY
    +X47E4pCIgBoyzkBpzM2uaf6tPRLtv8QcLklcf7lP5rd0Zypc325RR6+J5nxfCoFp
    +fD3sj7t/lJLS8Xb6m4/YFjsVJ2qEAelZ086v8unMBEj324Vv/VqrkPFtFNJKI+Az
    +Pd9xFDBdsKijBn1Yam9/dj7CiyZYKaVZ9p/w7Oqkpbrt8J8S8OtNHZ4fz9FJgRu9
    +uu+VTikCgYEA5ZkDmozDseA/c9JTUGAiPfAt5OrnqlKQNzp2m19GKh+Mlwg4k6O5
    +uE+0vaQEfc0cX3o8qntWNsb63XC9h6oHewrdyVFMZNS4nzzmKEvGWt9ON6qfQDUs
    +1cgZ0Y/uKydDX/3hk/hnJbeRW429rk0/GTuSHHilBzhE0uXJ11xPG48CgYEA2q7a
    +yqTdqPmZFIAYT9ny099PhnGYE6cJljTUMX9Xhk4POqcigcq9kvNNsly2O1t0Eq0H
    +2tYo91xTCZc3Cb0N+Vx3meLIljnzhEtwzU9w6W5VGJHWiqovjGwtCdm/W28OlMzY
    +zM+0gVCJzZLhL0vOwBLwGUJvjgfpvgIb/W+C2UcCgYB5TJ3ayQOath7P0g6yKBfv
    +ITUd+/zovzXx97Ex5OPs3T4pjO5XEejMt0+F4WF+FR8oUiw65W5nAjkHRMjdI7dQ
    +Ci2ibpEttDTV7Bass1vYJqHsRvhbs7w8NbtuO9xYcCXoUPkcc+AKzTC+beQIckcj
    +zZUj9Zk6dz/lLAG3Bc3FgQKBgQC+MmZI6auAU9Y4ZlC+4qi4bfkUzaefMCC+a6RC
    +iKbvQOUt9j+k81h+fu6MuuYkKh6CP8wdITbwLXRrWwGbjrqgrzO2u/AJ+M07uwGZ
    +EAb8f+GzROR8JhjE4TEq6B/uvmDIOoI1YFF2Rz4TdjQ0lpJzrAT3czjjJy68+8is
    +XFhJ8QKBgQCMPpB7taMLQzuilEGabL6Xas9UxryiGoBHk4Umb107GVWgwXxWT6fk
    +YSlvbMQHCgVeaJe374Bghyw33Z3WilWM1fCWya/CxXlw9wakjQHiqFCIOCxdgosX
    +Sr35bRFWJMnHXD+jD0Vr8WrtbGzFSZb3ZrjT6WhWRIGCHcaMANN9ew==
    +-----END RSA PRIVATE KEY-----
    diff --git a/src/prometheus/scrape/testdata/servername.cer b/src/prometheus/scrape/testdata/servername.cer
    new file mode 100644
    index 0000000..d9c8a9e
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/servername.cer
    @@ -0,0 +1,20 @@
    +-----BEGIN CERTIFICATE-----
    +MIIDOzCCAiMCCQDU4khDjkOJSTANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJY
    +WDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBh
    +bnkgTHRkMRswGQYDVQQDDBJQcm9tZXRoZXVzIFRlc3QgQ0EwHhcNMTYwNTI2MjEx
    +MjU5WhcNNDMxMDEyMjExMjU5WjBgMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29t
    +ZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRkwFwYD
    +VQQDExBwcm9tZXRoZXVzLnJvY2tzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
    +CgKCAQEAylgjuph/zgi1A2KS0Iw/73zdom449Gw+DATApL1sKYUTAVBk0uDpEZIw
    +fuYEAz6RbzBgzdYw10cmDCDDb0lNTBF4V08bGAXcYeJkKnIHRZprTPs7PWAai1jE
    +0H6ph+ThuHghPku7OAeyTvYyt5i0jkU2vgLSPa9wLciCfvwtd6S1gsthfEl8YsKH
    +iEVE+5h4nLjzp8MIgGBNPhzQvwW8x6bp0whuVzOFRHR1VBeK5rxG0LbCVU3Q5oPV
    +SLuRTkjQ6vNtm/qZPTw2mALjpRUrNxbA453aE33foJHb3gF85bSt67F7glFww5sq
    +GtxTiju8t8gNy7UV0ROlkoC7o1pMswIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCr
    +Fp+7FSOrgZO9BdBwmsnpNiymaOdf4ydOAXt5fdqkmgAyoRY5hPhFGduAfgKxESTf
    +tf8dKPV82j0EQR8EOu4qqDhXaKeZ69ZWMEkmpafO0MMixZ2/CeTV+z9DydLOZ2cC
    +IFJihSiLNGh8E4AUFdujbWBcTdv4FafRAiEhQ98iMyYiKXC/wcFLkL/u5Lvhr8yw
    +LGuaKwheDy41Q9Vdb2xlPbgDdibMlvOGxP1AWbE+/0fmmncwr7oeF6b4+mpMEDJS
    +XCoX6MSBdDmo9Gw1yH6l4KrvAI+StLWWxK2qs8lkWzZjiNS+JPWDeNqJBRmG6Yxc
    +Fl2KpVLCjhcNehUvg23x
    +-----END CERTIFICATE-----
    diff --git a/src/prometheus/scrape/testdata/servername.key b/src/prometheus/scrape/testdata/servername.key
    new file mode 100644
    index 0000000..95d6aca
    --- /dev/null
    +++ b/src/prometheus/scrape/testdata/servername.key
    @@ -0,0 +1,27 @@
    +-----BEGIN RSA PRIVATE KEY-----
    +MIIEogIBAAKCAQEAylgjuph/zgi1A2KS0Iw/73zdom449Gw+DATApL1sKYUTAVBk
    +0uDpEZIwfuYEAz6RbzBgzdYw10cmDCDDb0lNTBF4V08bGAXcYeJkKnIHRZprTPs7
    +PWAai1jE0H6ph+ThuHghPku7OAeyTvYyt5i0jkU2vgLSPa9wLciCfvwtd6S1gsth
    +fEl8YsKHiEVE+5h4nLjzp8MIgGBNPhzQvwW8x6bp0whuVzOFRHR1VBeK5rxG0LbC
    +VU3Q5oPVSLuRTkjQ6vNtm/qZPTw2mALjpRUrNxbA453aE33foJHb3gF85bSt67F7
    +glFww5sqGtxTiju8t8gNy7UV0ROlkoC7o1pMswIDAQABAoIBADZ5vETEQcRKe9FJ
    +fJVA7QWg7FqKqjLD4YCC1wqDJNeYyCEWb86GVrkwTnYbnwDwm17/+0/vVn7e3NNv
    +Dq6rYXAVU/zNg1HYYhjIRodW47ZNeI3lJXHEqeDSKUqojyPS7yIm1WxcHy9agxrX
    +FZhwOEwFPlOxlsCcturcjKV7ZxJKftiWoyPodQLjlEmNoD/MQ6Obuge1dQZRLwCk
    +/R+EcTWHN4A+rpnZLoKFEaw5p7DTjdKSGOu+EFB+lrEg5kTOCN/kR0PYGnDH1Ygd
    +6/DmP0xiPpT2pKudTtI7f+QoPtff+GJ47Xy1oYks/cXUJiJbtCT9wyKQtR5mZRUc
    +ruNWBCECgYEA9e87HbUaMA4tAqaur684RTFAqpDjDBB8tDAxbnuQrv6947odgQHu
    +YcBAneL2HIvUMuusI0X52nGRwt+qOSXiS1WQwA1P44qR28VYxLIkgK1xMEpezClU
    +xIavMzwZtmjCZ84Q6H/qvVuqa5MuE4pe6O9vnb4cUWF280ngmf+zViUCgYEA0qAx
    +qzh6cUBSF6PAV+7QKXB4YLfvLloX3qwC+qkdaGjacREb7URxTKs1lHLhpmHwoPN+
    +aXccxNs443Z67AK68N2RAOVw3z1IPTmSUzL7HCKqzZtRXsj+Lm8bj9sRzvWuE7RU
    +X2QW+9ppAvjwwrhG0vXCs3yua2usMyHjr6ekw/cCgYBSut0qCyf6Dmq5v5R36PuG
    +2yCjwAWAo3Mvsh6OyeZL18nM92jBYwLrwx55fkXIKImDb6ACZaG9CAM+iLrcapAL
    +Q4dj85ZyNsUGJwbLdBmvZ6jx07K7/xNS4PPCym7j2625+anabF1swY88jNAtJpjy
    +xsjHSZKBFcZL5Qg3BbswOQKBgHigD/IMRWtot9scCAMUHRkudXKGxK9aH4OCJa6i
    +fdoW+st4TfMjmHOdNfFPndWpD6NN8B68fbhsCHeUmi9iHOfnLK1DudHQCfguaZPG
    +hbOGUyWvhvluyMuVDEbl4pwRbeGRDCUZcGRKoIt4QIJ0APO+lgQvKsEQiC08gmZN
    +73nfAoGAKXVVV7dN59gohMTRWsOSGP+YLEj8+rGZZYNKCLVTol0VQ7T30tA0P4Cf
    +Dw9oLKGnDdgTtJA6Fsms858B6ANC+6Hxd9LG0ecOevKMBFHuWPm56Z0ofDzoPVBW
    +eDuHeR5xF0xq5PIFl/mIJJ1NK0p1Do9gwqEEIftdNyrcGefGdXk=
    +-----END RSA PRIVATE KEY-----
    diff --git a/src/prometheus/scripts/genproto.sh b/src/prometheus/scripts/genproto.sh
    new file mode 100755
    index 0000000..a46eedc
    --- /dev/null
    +++ b/src/prometheus/scripts/genproto.sh
    @@ -0,0 +1,49 @@
    +#!/usr/bin/env bash
    +#
    +# Generate all protobuf bindings.
    +# Run from repository root.
    +set -e
    +set -u
    +
    +if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
    +	echo "must be run from repository root"
    +	exit 255
    +fi
    +
    +if ! [[ $(protoc --version) =~ "3.5" ]]; then
    +	echo "could not find protoc 3.5.x, is it installed + in PATH?"
    +	exit 255
    +fi
    +
    +PROM_ROOT="${GOPATH}/src/github.com/prometheus/prometheus"
    +PROM_PATH="${PROM_ROOT}/prompb"
    +GOGOPROTO_ROOT="${GOPATH}/src/github.com/gogo/protobuf"
    +GOGOPROTO_PATH="${GOGOPROTO_ROOT}:${GOGOPROTO_ROOT}/protobuf"
    +GRPC_GATEWAY_ROOT="${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway"
    +
    +DIRS="prompb"
    +
    +for dir in ${DIRS}; do
    +	pushd ${dir}
    +		protoc --gogofast_out=plugins=grpc:. -I=. \
    +            -I="${GOGOPROTO_PATH}" \
    +            -I="${PROM_PATH}" \
    +            -I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
    +            *.proto
    +
    +		protoc -I. \
    +			-I="${GOGOPROTO_PATH}" \
    +			-I="${PROM_PATH}" \
    +			-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
    +			--grpc-gateway_out=logtostderr=true:. \
    +			--swagger_out=logtostderr=true:../documentation/dev/api/ \
    +			rpc.proto
    +		mv ../documentation/dev/api/rpc.swagger.json ../documentation/dev/api/swagger.json
    +		
    +		sed -i.bak -E 's/import _ \"gogoproto\"//g' *.pb.go
    +		sed -i.bak -E 's/import _ \"google\/protobuf\"//g' *.pb.go
    +		sed -i.bak -E 's/golang\/protobuf/gogo\/protobuf/g' *.go
    +		rm -f *.bak
    +		goimports -w *.pb.go
    +	popd
    +done
    diff --git a/src/prometheus/storage/buffer.go b/src/prometheus/storage/buffer.go
    new file mode 100644
    index 0000000..7df4027
    --- /dev/null
    +++ b/src/prometheus/storage/buffer.go
    @@ -0,0 +1,237 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package storage
    +
    +import (
    +	"math"
    +)
    +
    +// BufferedSeriesIterator wraps an iterator with a look-back buffer.
    +type BufferedSeriesIterator struct {
    +	it  SeriesIterator
    +	buf *sampleRing
    +
    +	lastTime int64
    +	ok       bool
    +}
    +
    +// NewBuffer returns a new iterator that buffers the values within the time range
    +// of the current element and the duration of delta before.
    +func NewBuffer(it SeriesIterator, delta int64) *BufferedSeriesIterator {
    +	bit := &BufferedSeriesIterator{
    +		buf: newSampleRing(delta, 16),
    +	}
    +	bit.Reset(it)
    +
    +	return bit
    +}
    +
    +// Reset re-uses the buffer with a new iterator.
    +func (b *BufferedSeriesIterator) Reset(it SeriesIterator) {
    +	b.it = it
    +	b.lastTime = math.MinInt64
    +	b.ok = true
    +	b.buf.reset()
    +	it.Next()
    +}
    +
    +// PeekBack returns the nth previous element of the iterator. If there is none buffered,
    +// ok is false.
    +func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, ok bool) {
    +	return b.buf.nthLast(n)
    +}
    +
    +// Buffer returns an iterator over the buffered data. Invalidates previously
    +// returned iterators.
    +func (b *BufferedSeriesIterator) Buffer() SeriesIterator {
    +	return b.buf.iterator()
    +}
    +
    +// Seek advances the iterator to the element at time t or greater.
    +func (b *BufferedSeriesIterator) Seek(t int64) bool {
    +	t0 := t - b.buf.delta
    +
    +	// If the delta would cause us to seek backwards, preserve the buffer
    +	// and just continue regular advancment while filling the buffer on the way.
    +	if t0 > b.lastTime {
    +		b.buf.reset()
    +
    +		b.ok = b.it.Seek(t0)
    +		if !b.ok {
    +			return false
    +		}
    +		b.lastTime, _ = b.Values()
    +	}
    +
    +	if b.lastTime >= t {
    +		return true
    +	}
    +	for b.Next() {
    +		if b.lastTime >= t {
    +			return true
    +		}
    +	}
    +
    +	return false
    +}
    +
    +// Next advances the iterator to the next element.
    +func (b *BufferedSeriesIterator) Next() bool {
    +	if !b.ok {
    +		return false
    +	}
    +
    +	// Add current element to buffer before advancing.
    +	b.buf.add(b.it.At())
    +
    +	b.ok = b.it.Next()
    +	if b.ok {
    +		b.lastTime, _ = b.Values()
    +	}
    +
    +	return b.ok
    +}
    +
    +// Values returns the current element of the iterator.
    +func (b *BufferedSeriesIterator) Values() (int64, float64) {
    +	return b.it.At()
    +}
    +
    +// Err returns the last encountered error.
    +func (b *BufferedSeriesIterator) Err() error {
    +	return b.it.Err()
    +}
    +
    +type sample struct {
    +	t int64
    +	v float64
    +}
    +
    +type sampleRing struct {
    +	delta int64
    +
    +	buf []sample // lookback buffer
    +	i   int      // position of most recent element in ring buffer
    +	f   int      // position of first element in ring buffer
    +	l   int      // number of elements in buffer
    +
    +	it sampleRingIterator
    +}
    +
    +func newSampleRing(delta int64, sz int) *sampleRing {
    +	r := &sampleRing{delta: delta, buf: make([]sample, sz)}
    +	r.reset()
    +
    +	return r
    +}
    +
    +func (r *sampleRing) reset() {
    +	r.l = 0
    +	r.i = -1
    +	r.f = 0
    +}
    +
    +// Returns the current iterator. Invalidates previously retuned iterators.
    +func (r *sampleRing) iterator() SeriesIterator {
    +	r.it.r = r
    +	r.it.i = -1
    +	return &r.it
    +}
    +
    +type sampleRingIterator struct {
    +	r *sampleRing
    +	i int
    +}
    +
    +func (it *sampleRingIterator) Next() bool {
    +	it.i++
    +	return it.i < it.r.l
    +}
    +
    +func (it *sampleRingIterator) Seek(int64) bool {
    +	return false
    +}
    +
    +func (it *sampleRingIterator) Err() error {
    +	return nil
    +}
    +
    +func (it *sampleRingIterator) At() (int64, float64) {
    +	return it.r.at(it.i)
    +}
    +
    +func (r *sampleRing) at(i int) (int64, float64) {
    +	j := (r.f + i) % len(r.buf)
    +	s := r.buf[j]
    +	return s.t, s.v
    +}
    +
    +// add adds a sample to the ring buffer and frees all samples that fall
    +// out of the delta range.
    +func (r *sampleRing) add(t int64, v float64) {
    +	l := len(r.buf)
    +	// Grow the ring buffer if it fits no more elements.
    +	if l == r.l {
    +		buf := make([]sample, 2*l)
    +		copy(buf[l+r.f:], r.buf[r.f:])
    +		copy(buf, r.buf[:r.f])
    +
    +		r.buf = buf
    +		r.i = r.f
    +		r.f += l
    +		l = 2 * l
    +	} else {
    +		r.i++
    +		if r.i >= l {
    +			r.i -= l
    +		}
    +	}
    +
    +	r.buf[r.i] = sample{t: t, v: v}
    +	r.l++
    +
    +	// Free head of the buffer of samples that just fell out of the range.
    +	for r.buf[r.f].t < t-r.delta {
    +		r.f++
    +		if r.f >= l {
    +			r.f -= l
    +		}
    +		r.l--
    +	}
    +}
    +
    +// nthLast returns the nth most recent element added to the ring.
    +func (r *sampleRing) nthLast(n int) (int64, float64, bool) {
    +	if n > r.l {
    +		return 0, 0, false
    +	}
    +	t, v := r.at(r.l - n)
    +	return t, v, true
    +}
    +
    +func (r *sampleRing) samples() []sample {
    +	res := make([]sample, r.l)
    +
    +	var k = r.f + r.l
    +	var j int
    +	if k > len(r.buf) {
    +		k = len(r.buf)
    +		j = r.l - k + r.f
    +	}
    +
    +	n := copy(res, r.buf[r.f:k])
    +	copy(res[n:], r.buf[:j])
    +
    +	return res
    +}
    diff --git a/src/prometheus/storage/buffer_test.go b/src/prometheus/storage/buffer_test.go
    new file mode 100644
    index 0000000..5b752de
    --- /dev/null
    +++ b/src/prometheus/storage/buffer_test.go
    @@ -0,0 +1,257 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package storage
    +
    +import (
    +	"math/rand"
    +	"sort"
    +	"testing"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/stretchr/testify/require"
    +)
    +
    +func TestSampleRing(t *testing.T) {
    +	cases := []struct {
    +		input []int64
    +		delta int64
    +		size  int
    +	}{
    +		{
    +			input: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
    +			delta: 2,
    +			size:  1,
    +		},
    +		{
    +			input: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
    +			delta: 2,
    +			size:  2,
    +		},
    +		{
    +			input: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
    +			delta: 7,
    +			size:  3,
    +		},
    +		{
    +			input: []int64{1, 2, 3, 4, 5, 16, 17, 18, 19, 20},
    +			delta: 7,
    +			size:  1,
    +		},
    +		{
    +			input: []int64{1, 2, 3, 4, 6},
    +			delta: 4,
    +			size:  4,
    +		},
    +	}
    +	for _, c := range cases {
    +		r := newSampleRing(c.delta, c.size)
    +
    +		input := []sample{}
    +		for _, t := range c.input {
    +			input = append(input, sample{
    +				t: t,
    +				v: float64(rand.Intn(100)),
    +			})
    +		}
    +
    +		for i, s := range input {
    +			r.add(s.t, s.v)
    +			buffered := r.samples()
    +
    +			for _, sold := range input[:i] {
    +				found := false
    +				for _, bs := range buffered {
    +					if bs.t == sold.t && bs.v == sold.v {
    +						found = true
    +						break
    +					}
    +				}
    +				if sold.t >= s.t-c.delta && !found {
    +					t.Fatalf("%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered)
    +				}
    +				if sold.t < s.t-c.delta && found {
    +					t.Fatalf("%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered)
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +func TestBufferedSeriesIterator(t *testing.T) {
    +	var it *BufferedSeriesIterator
    +
    +	bufferEq := func(exp []sample) {
    +		var b []sample
    +		bit := it.Buffer()
    +		for bit.Next() {
    +			t, v := bit.At()
    +			b = append(b, sample{t: t, v: v})
    +		}
    +		require.Equal(t, exp, b, "buffer mismatch")
    +	}
    +	sampleEq := func(ets int64, ev float64) {
    +		ts, v := it.Values()
    +		require.Equal(t, ets, ts, "timestamp mismatch")
    +		require.Equal(t, ev, v, "value mismatch")
    +	}
    +
    +	it = NewBuffer(newListSeriesIterator([]sample{
    +		{t: 1, v: 2},
    +		{t: 2, v: 3},
    +		{t: 3, v: 4},
    +		{t: 4, v: 5},
    +		{t: 5, v: 6},
    +		{t: 99, v: 8},
    +		{t: 100, v: 9},
    +		{t: 101, v: 10},
    +	}), 2)
    +
    +	require.True(t, it.Seek(-123), "seek failed")
    +	sampleEq(1, 2)
    +	bufferEq(nil)
    +
    +	require.True(t, it.Next(), "next failed")
    +	sampleEq(2, 3)
    +	bufferEq([]sample{{t: 1, v: 2}})
    +
    +	require.True(t, it.Next(), "next failed")
    +	require.True(t, it.Next(), "next failed")
    +	require.True(t, it.Next(), "next failed")
    +	sampleEq(5, 6)
    +	bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
    +
    +	require.True(t, it.Seek(5), "seek failed")
    +	sampleEq(5, 6)
    +	bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
    +
    +	require.True(t, it.Seek(101), "seek failed")
    +	sampleEq(101, 10)
    +	bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
    +
    +	require.False(t, it.Next(), "next succeeded unexpectedly")
    +}
    +
    +// At() should not be called once Next() returns false.
    +func TestBufferedSeriesIteratorNoBadAt(t *testing.T) {
    +	done := false
    +
    +	m := &mockSeriesIterator{
    +		seek: func(int64) bool { return false },
    +		at: func() (int64, float64) {
    +			require.False(t, done)
    +			done = true
    +			return 0, 0
    +		},
    +		next: func() bool { return !done },
    +		err:  func() error { return nil },
    +	}
    +
    +	it := NewBuffer(m, 60)
    +	it.Next()
    +	it.Next()
    +}
    +
    +func BenchmarkBufferedSeriesIterator(b *testing.B) {
    +	var (
    +		samples []sample
    +		lastT   int64
    +	)
    +	for i := 0; i < b.N; i++ {
    +		lastT += 30
    +
    +		samples = append(samples, sample{
    +			t: lastT,
    +			v: 123, // doesn't matter
    +		})
    +	}
    +
    +	// Simulate a 5 minute rate.
    +	it := NewBuffer(newListSeriesIterator(samples), 5*60)
    +
    +	b.SetBytes(int64(b.N * 16))
    +	b.ReportAllocs()
    +	b.ResetTimer()
    +
    +	for it.Next() {
    +		// scan everything
    +	}
    +	require.NoError(b, it.Err())
    +}
    +
    +type mockSeriesIterator struct {
    +	seek func(int64) bool
    +	at   func() (int64, float64)
    +	next func() bool
    +	err  func() error
    +}
    +
    +func (m *mockSeriesIterator) Seek(t int64) bool    { return m.seek(t) }
    +func (m *mockSeriesIterator) At() (int64, float64) { return m.at() }
    +func (m *mockSeriesIterator) Next() bool           { return m.next() }
    +func (m *mockSeriesIterator) Err() error           { return m.err() }
    +
    +type mockSeries struct {
    +	labels   func() labels.Labels
    +	iterator func() SeriesIterator
    +}
    +
    +func newMockSeries(lset labels.Labels, samples []sample) Series {
    +	return &mockSeries{
    +		labels: func() labels.Labels {
    +			return lset
    +		},
    +		iterator: func() SeriesIterator {
    +			return newListSeriesIterator(samples)
    +		},
    +	}
    +}
    +
    +func (m *mockSeries) Labels() labels.Labels    { return m.labels() }
    +func (m *mockSeries) Iterator() SeriesIterator { return m.iterator() }
    +
    +type listSeriesIterator struct {
    +	list []sample
    +	idx  int
    +}
    +
    +func newListSeriesIterator(list []sample) *listSeriesIterator {
    +	return &listSeriesIterator{list: list, idx: -1}
    +}
    +
    +func (it *listSeriesIterator) At() (int64, float64) {
    +	s := it.list[it.idx]
    +	return s.t, s.v
    +}
    +
    +func (it *listSeriesIterator) Next() bool {
    +	it.idx++
    +	return it.idx < len(it.list)
    +}
    +
    +func (it *listSeriesIterator) Seek(t int64) bool {
    +	if it.idx == -1 {
    +		it.idx = 0
    +	}
    +	// Do binary search between current position and end.
    +	it.idx = sort.Search(len(it.list)-it.idx, func(i int) bool {
    +		s := it.list[i+it.idx]
    +		return s.t >= t
    +	})
    +
    +	return it.idx < len(it.list)
    +}
    +
    +func (it *listSeriesIterator) Err() error {
    +	return nil
    +}
    diff --git a/src/prometheus/storage/fanout.go b/src/prometheus/storage/fanout.go
    new file mode 100644
    index 0000000..3ab9943
    --- /dev/null
    +++ b/src/prometheus/storage/fanout.go
    @@ -0,0 +1,499 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package storage
    +
    +import (
    +	"container/heap"
    +	"context"
    +	"strings"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/go-kit/kit/log/level"
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +)
    +
    +type fanout struct {
    +	logger log.Logger
    +
    +	primary     Storage
    +	secondaries []Storage
    +}
    +
    +// NewFanout returns a new fan-out Storage, which proxies reads and writes
    +// through to multiple underlying storages.
    +func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage {
    +	return &fanout{
    +		logger:      logger,
    +		primary:     primary,
    +		secondaries: secondaries,
    +	}
    +}
    +
    +// StartTime implements the Storage interface.
    +func (f *fanout) StartTime() (int64, error) {
    +	// StartTime of a fanout should be the earliest StartTime of all its storages,
    +	// both primary and secondaries.
    +	firstTime, err := f.primary.StartTime()
    +	if err != nil {
    +		return int64(model.Latest), err
    +	}
    +
    +	for _, storage := range f.secondaries {
    +		t, err := storage.StartTime()
    +		if err != nil {
    +			return int64(model.Latest), err
    +		}
    +		if t < firstTime {
    +			firstTime = t
    +		}
    +	}
    +	return firstTime, nil
    +}
    +
    +func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
    +	queriers := make([]Querier, 0, 1+len(f.secondaries))
    +
    +	// Add primary querier
    +	querier, err := f.primary.Querier(ctx, mint, maxt)
    +	if err != nil {
    +		return nil, err
    +	}
    +	queriers = append(queriers, querier)
    +
    +	// Add secondary queriers
    +	for _, storage := range f.secondaries {
    +		querier, err := storage.Querier(ctx, mint, maxt)
    +		if err != nil {
    +			NewMergeQuerier(queriers).Close()
    +			return nil, err
    +		}
    +		queriers = append(queriers, querier)
    +	}
    +
    +	return NewMergeQuerier(queriers), nil
    +}
    +
    +func (f *fanout) Appender() (Appender, error) {
    +	primary, err := f.primary.Appender()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	secondaries := make([]Appender, 0, len(f.secondaries))
    +	for _, storage := range f.secondaries {
    +		appender, err := storage.Appender()
    +		if err != nil {
    +			return nil, err
    +		}
    +		secondaries = append(secondaries, appender)
    +	}
    +	return &fanoutAppender{
    +		logger:      f.logger,
    +		primary:     primary,
    +		secondaries: secondaries,
    +	}, nil
    +}
    +
    +// Close closes the storage and all its underlying resources.
    +func (f *fanout) Close() error {
    +	if err := f.primary.Close(); err != nil {
    +		return err
    +	}
    +
    +	// TODO return multiple errors?
    +	var lastErr error
    +	for _, storage := range f.secondaries {
    +		if err := storage.Close(); err != nil {
    +			lastErr = err
    +		}
    +	}
    +	return lastErr
    +}
    +
    +// fanoutAppender implements Appender.
    +type fanoutAppender struct {
    +	logger log.Logger
    +
    +	primary     Appender
    +	secondaries []Appender
    +}
    +
    +func (f *fanoutAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
    +	ref, err := f.primary.Add(l, t, v)
    +	if err != nil {
    +		return ref, err
    +	}
    +
    +	for _, appender := range f.secondaries {
    +		if _, err := appender.Add(l, t, v); err != nil {
    +			return 0, err
    +		}
    +	}
    +	return ref, nil
    +}
    +
    +func (f *fanoutAppender) AddFast(l labels.Labels, ref uint64, t int64, v float64) error {
    +	if err := f.primary.AddFast(l, ref, t, v); err != nil {
    +		return err
    +	}
    +
    +	for _, appender := range f.secondaries {
    +		if _, err := appender.Add(l, t, v); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +func (f *fanoutAppender) Commit() (err error) {
    +	err = f.primary.Commit()
    +
    +	for _, appender := range f.secondaries {
    +		if err == nil {
    +			err = appender.Commit()
    +		} else {
    +			if rollbackErr := appender.Rollback(); rollbackErr != nil {
    +				level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr)
    +			}
    +		}
    +	}
    +	return
    +}
    +
    +func (f *fanoutAppender) Rollback() (err error) {
    +	err = f.primary.Rollback()
    +
    +	for _, appender := range f.secondaries {
    +		rollbackErr := appender.Rollback()
    +		if err == nil {
    +			err = rollbackErr
    +		} else if rollbackErr != nil {
    +			level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
    +		}
    +	}
    +	return nil
    +}
    +
    +// mergeQuerier implements Querier.
    +type mergeQuerier struct {
    +	queriers []Querier
    +}
    +
    +// NewMergeQuerier returns a new Querier that merges results of input queriers.
    +// NB NewMergeQuerier will return NoopQuerier if no queriers are passed to it,
    +// and will filter NoopQueriers from its arguments, in order to reduce overhead
    +// when only one querier is passed.
    +func NewMergeQuerier(queriers []Querier) Querier {
    +	filtered := make([]Querier, 0, len(queriers))
    +	for _, querier := range queriers {
    +		if querier != NoopQuerier() {
    +			filtered = append(filtered, querier)
    +		}
    +	}
    +
    +	switch len(filtered) {
    +	case 0:
    +		return NoopQuerier()
    +	case 1:
    +		return filtered[0]
    +	default:
    +		return &mergeQuerier{
    +			queriers: filtered,
    +		}
    +	}
    +}
    +
    +// Select returns a set of series that matches the given label matchers.
    +func (q *mergeQuerier) Select(params *SelectParams, matchers ...*labels.Matcher) (SeriesSet, error) {
    +	seriesSets := make([]SeriesSet, 0, len(q.queriers))
    +	for _, querier := range q.queriers {
    +		set, err := querier.Select(params, matchers...)
    +		if err != nil {
    +			return nil, err
    +		}
    +		seriesSets = append(seriesSets, set)
    +	}
    +	return NewMergeSeriesSet(seriesSets), nil
    +}
    +
    +// LabelValues returns all potential values for a label name.
    +func (q *mergeQuerier) LabelValues(name string) ([]string, error) {
    +	var results [][]string
    +	for _, querier := range q.queriers {
    +		values, err := querier.LabelValues(name)
    +		if err != nil {
    +			return nil, err
    +		}
    +		results = append(results, values)
    +	}
    +	return mergeStringSlices(results), nil
    +}
    +
    +func mergeStringSlices(ss [][]string) []string {
    +	switch len(ss) {
    +	case 0:
    +		return nil
    +	case 1:
    +		return ss[0]
    +	case 2:
    +		return mergeTwoStringSlices(ss[0], ss[1])
    +	default:
    +		halfway := len(ss) / 2
    +		return mergeTwoStringSlices(
    +			mergeStringSlices(ss[:halfway]),
    +			mergeStringSlices(ss[halfway:]),
    +		)
    +	}
    +}
    +
    +func mergeTwoStringSlices(a, b []string) []string {
    +	i, j := 0, 0
    +	result := make([]string, 0, len(a)+len(b))
    +	for i < len(a) && j < len(b) {
    +		switch strings.Compare(a[i], b[j]) {
    +		case 0:
    +			result = append(result, a[i])
    +			i++
    +			j++
    +		case -1:
    +			result = append(result, a[i])
    +			i++
    +		case 1:
    +			result = append(result, b[j])
    +			j++
    +		}
    +	}
    +	result = append(result, a[i:]...)
    +	result = append(result, b[j:]...)
    +	return result
    +}
    +
    +// Close releases the resources of the Querier.
    +func (q *mergeQuerier) Close() error {
    +	// TODO return multiple errors?
    +	var lastErr error
    +	for _, querier := range q.queriers {
    +		if err := querier.Close(); err != nil {
    +			lastErr = err
    +		}
    +	}
    +	return lastErr
    +}
    +
    +// mergeSeriesSet implements SeriesSet
    +type mergeSeriesSet struct {
    +	currentLabels labels.Labels
    +	currentSets   []SeriesSet
    +	heap          seriesSetHeap
    +	sets          []SeriesSet
    +}
    +
    +// NewMergeSeriesSet returns a new series set that merges (deduplicates)
    +// series returned by the input series sets when iterating.
    +func NewMergeSeriesSet(sets []SeriesSet) SeriesSet {
    +	if len(sets) == 1 {
    +		return sets[0]
    +	}
    +
    +	// Sets need to be pre-advanced, so we can introspect the label of the
    +	// series under the cursor.
    +	var h seriesSetHeap
    +	for _, set := range sets {
    +		if set.Next() {
    +			heap.Push(&h, set)
    +		}
    +	}
    +	return &mergeSeriesSet{
    +		heap: h,
    +		sets: sets,
    +	}
    +}
    +
    +func (c *mergeSeriesSet) Next() bool {
    +	// Firstly advance all the current series sets.  If any of them have run out
    +	// we can drop them, otherwise they should be inserted back into the heap.
    +	for _, set := range c.currentSets {
    +		if set.Next() {
    +			heap.Push(&c.heap, set)
    +		}
    +	}
    +	if len(c.heap) == 0 {
    +		return false
    +	}
    +
    +	// Now, pop items of the heap that have equal label sets.
    +	c.currentSets = nil
    +	c.currentLabels = c.heap[0].At().Labels()
    +	for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) {
    +		set := heap.Pop(&c.heap).(SeriesSet)
    +		c.currentSets = append(c.currentSets, set)
    +	}
    +	return true
    +}
    +
    +func (c *mergeSeriesSet) At() Series {
    +	if len(c.currentSets) == 1 {
    +		return c.currentSets[0].At()
    +	}
    +	series := []Series{}
    +	for _, seriesSet := range c.currentSets {
    +		series = append(series, seriesSet.At())
    +	}
    +	return &mergeSeries{
    +		labels: c.currentLabels,
    +		series: series,
    +	}
    +}
    +
    +func (c *mergeSeriesSet) Err() error {
    +	for _, set := range c.sets {
    +		if err := set.Err(); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +type seriesSetHeap []SeriesSet
    +
    +func (h seriesSetHeap) Len() int      { return len(h) }
    +func (h seriesSetHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
    +
    +func (h seriesSetHeap) Less(i, j int) bool {
    +	a, b := h[i].At().Labels(), h[j].At().Labels()
    +	return labels.Compare(a, b) < 0
    +}
    +
    +func (h *seriesSetHeap) Push(x interface{}) {
    +	*h = append(*h, x.(SeriesSet))
    +}
    +
    +func (h *seriesSetHeap) Pop() interface{} {
    +	old := *h
    +	n := len(old)
    +	x := old[n-1]
    +	*h = old[0 : n-1]
    +	return x
    +}
    +
    +type mergeSeries struct {
    +	labels labels.Labels
    +	series []Series
    +}
    +
    +func (m *mergeSeries) Labels() labels.Labels {
    +	return m.labels
    +}
    +
    +func (m *mergeSeries) Iterator() SeriesIterator {
    +	iterators := make([]SeriesIterator, 0, len(m.series))
    +	for _, s := range m.series {
    +		iterators = append(iterators, s.Iterator())
    +	}
    +	return newMergeIterator(iterators)
    +}
    +
    +type mergeIterator struct {
    +	iterators []SeriesIterator
    +	h         seriesIteratorHeap
    +}
    +
    +func newMergeIterator(iterators []SeriesIterator) SeriesIterator {
    +	return &mergeIterator{
    +		iterators: iterators,
    +		h:         nil,
    +	}
    +}
    +
    +func (c *mergeIterator) Seek(t int64) bool {
    +	c.h = seriesIteratorHeap{}
    +	for _, iter := range c.iterators {
    +		if iter.Seek(t) {
    +			heap.Push(&c.h, iter)
    +		}
    +	}
    +	return len(c.h) > 0
    +}
    +
    +func (c *mergeIterator) At() (t int64, v float64) {
    +	if len(c.h) == 0 {
    +		panic("mergeIterator.At() called after .Next() returned false.")
    +	}
    +
    +	return c.h[0].At()
    +}
    +
    +func (c *mergeIterator) Next() bool {
    +	if c.h == nil {
    +		for _, iter := range c.iterators {
    +			if iter.Next() {
    +				heap.Push(&c.h, iter)
    +			}
    +		}
    +
    +		return len(c.h) > 0
    +	}
    +
    +	if len(c.h) == 0 {
    +		return false
    +	}
    +
    +	currt, _ := c.At()
    +	for len(c.h) > 0 {
    +		nextt, _ := c.h[0].At()
    +		if nextt != currt {
    +			break
    +		}
    +
    +		iter := heap.Pop(&c.h).(SeriesIterator)
    +		if iter.Next() {
    +			heap.Push(&c.h, iter)
    +		}
    +	}
    +
    +	return len(c.h) > 0
    +}
    +
    +func (c *mergeIterator) Err() error {
    +	for _, iter := range c.iterators {
    +		if err := iter.Err(); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +type seriesIteratorHeap []SeriesIterator
    +
    +func (h seriesIteratorHeap) Len() int      { return len(h) }
    +func (h seriesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
    +
    +func (h seriesIteratorHeap) Less(i, j int) bool {
    +	at, _ := h[i].At()
    +	bt, _ := h[j].At()
    +	return at < bt
    +}
    +
    +func (h *seriesIteratorHeap) Push(x interface{}) {
    +	*h = append(*h, x.(SeriesIterator))
    +}
    +
    +func (h *seriesIteratorHeap) Pop() interface{} {
    +	old := *h
    +	n := len(old)
    +	x := old[n-1]
    +	*h = old[0 : n-1]
    +	return x
    +}
    diff --git a/src/prometheus/storage/fanout_test.go b/src/prometheus/storage/fanout_test.go
    new file mode 100644
    index 0000000..3328673
    --- /dev/null
    +++ b/src/prometheus/storage/fanout_test.go
    @@ -0,0 +1,294 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package storage
    +
    +import (
    +	"fmt"
    +	"math"
    +	"testing"
    +
    +	"github.com/stretchr/testify/require"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +)
    +
    +func TestMergeStringSlices(t *testing.T) {
    +	for _, tc := range []struct {
    +		input    [][]string
    +		expected []string
    +	}{
    +		{},
    +		{[][]string{{"foo"}}, []string{"foo"}},
    +		{[][]string{{"foo"}, {"bar"}}, []string{"bar", "foo"}},
    +		{[][]string{{"foo"}, {"bar"}, {"baz"}}, []string{"bar", "baz", "foo"}},
    +	} {
    +		require.Equal(t, tc.expected, mergeStringSlices(tc.input))
    +	}
    +}
    +
    +func TestMergeTwoStringSlices(t *testing.T) {
    +	for _, tc := range []struct {
    +		a, b, expected []string
    +	}{
    +		{[]string{}, []string{}, []string{}},
    +		{[]string{"foo"}, nil, []string{"foo"}},
    +		{nil, []string{"bar"}, []string{"bar"}},
    +		{[]string{"foo"}, []string{"bar"}, []string{"bar", "foo"}},
    +		{[]string{"foo"}, []string{"bar", "baz"}, []string{"bar", "baz", "foo"}},
    +		{[]string{"foo"}, []string{"foo"}, []string{"foo"}},
    +	} {
    +		require.Equal(t, tc.expected, mergeTwoStringSlices(tc.a, tc.b))
    +	}
    +}
    +
    +func TestMergeSeriesSet(t *testing.T) {
    +	for _, tc := range []struct {
    +		input    []SeriesSet
    +		expected SeriesSet
    +	}{
    +		{
    +			input:    []SeriesSet{newMockSeriesSet()},
    +			expected: newMockSeriesSet(),
    +		},
    +
    +		{
    +			input: []SeriesSet{newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{1, 1}, {2, 2}}),
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, 0}, {1, 1}}),
    +			)},
    +			expected: newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{1, 1}, {2, 2}}),
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, 0}, {1, 1}}),
    +			),
    +		},
    +
    +		{
    +			input: []SeriesSet{newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, 0}, {1, 1}}),
    +			), newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{1, 1}, {2, 2}}),
    +			)},
    +			expected: newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{1, 1}, {2, 2}}),
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, 0}, {1, 1}}),
    +			),
    +		},
    +
    +		{
    +			input: []SeriesSet{newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{1, 1}, {2, 2}}),
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, 0}, {1, 1}}),
    +			), newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{3, 3}, {4, 4}}),
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{2, 2}, {3, 3}}),
    +			)},
    +			expected: newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("bar", "baz"), []sample{{1, 1}, {2, 2}, {3, 3}, {4, 4}}),
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}}),
    +			),
    +		},
    +		{
    +			input: []SeriesSet{newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, math.NaN()}}),
    +			), newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, math.NaN()}}),
    +			)},
    +			expected: newMockSeriesSet(
    +				newMockSeries(labels.FromStrings("foo", "bar"), []sample{{0, math.NaN()}}),
    +			),
    +		},
    +	} {
    +		merged := NewMergeSeriesSet(tc.input)
    +		for merged.Next() {
    +			require.True(t, tc.expected.Next())
    +			actualSeries := merged.At()
    +			expectedSeries := tc.expected.At()
    +			require.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
    +			require.Equal(t, drainSamples(expectedSeries.Iterator()), drainSamples(actualSeries.Iterator()))
    +		}
    +		require.False(t, tc.expected.Next())
    +	}
    +}
    +
    +func TestMergeIterator(t *testing.T) {
    +	for _, tc := range []struct {
    +		input    []SeriesIterator
    +		expected []sample
    +	}{
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
    +			},
    +			expected: []sample{{0, 0}, {1, 1}},
    +		},
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
    +				newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
    +			},
    +			expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}},
    +		},
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {3, 3}}),
    +				newListSeriesIterator([]sample{{1, 1}, {4, 4}}),
    +				newListSeriesIterator([]sample{{2, 2}, {5, 5}}),
    +			},
    +			expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}},
    +		},
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
    +				newListSeriesIterator([]sample{{0, 0}, {2, 2}}),
    +				newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
    +			},
    +			expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}},
    +		},
    +	} {
    +		merged := newMergeIterator(tc.input)
    +		actual := drainSamples(merged)
    +		require.Equal(t, tc.expected, actual)
    +	}
    +}
    +
    +func TestMergeIteratorSeek(t *testing.T) {
    +	for _, tc := range []struct {
    +		input    []SeriesIterator
    +		seek     int64
    +		expected []sample
    +	}{
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {1, 1}, {2, 2}}),
    +			},
    +			seek:     1,
    +			expected: []sample{{1, 1}, {2, 2}},
    +		},
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
    +				newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
    +			},
    +			seek:     2,
    +			expected: []sample{{2, 2}, {3, 3}},
    +		},
    +		{
    +			input: []SeriesIterator{
    +				newListSeriesIterator([]sample{{0, 0}, {3, 3}}),
    +				newListSeriesIterator([]sample{{1, 1}, {4, 4}}),
    +				newListSeriesIterator([]sample{{2, 2}, {5, 5}}),
    +			},
    +			seek:     2,
    +			expected: []sample{{2, 2}, {3, 3}, {4, 4}, {5, 5}},
    +		},
    +	} {
    +		merged := newMergeIterator(tc.input)
    +		actual := []sample{}
    +		if merged.Seek(tc.seek) {
    +			t, v := merged.At()
    +			actual = append(actual, sample{t, v})
    +		}
    +		actual = append(actual, drainSamples(merged)...)
    +		require.Equal(t, tc.expected, actual)
    +	}
    +}
    +
    +func drainSamples(iter SeriesIterator) []sample {
    +	result := []sample{}
    +	for iter.Next() {
    +		t, v := iter.At()
    +		// NaNs can't be compared normally, so substitute for another value.
    +		if math.IsNaN(v) {
    +			v = -42
    +		}
    +		result = append(result, sample{t, v})
    +	}
    +	return result
    +}
    +
    +type mockSeriesSet struct {
    +	idx    int
    +	series []Series
    +}
    +
    +func newMockSeriesSet(series ...Series) SeriesSet {
    +	return &mockSeriesSet{
    +		idx:    -1,
    +		series: series,
    +	}
    +}
    +
    +func (m *mockSeriesSet) Next() bool {
    +	m.idx++
    +	return m.idx < len(m.series)
    +}
    +
    +func (m *mockSeriesSet) At() Series {
    +	return m.series[m.idx]
    +}
    +
    +func (m *mockSeriesSet) Err() error {
    +	return nil
    +}
    +
    +var result []sample
    +
    +func makeSeriesSet(numSeries, numSamples int) SeriesSet {
    +	series := []Series{}
    +	for j := 0; j < numSeries; j++ {
    +		labels := labels.Labels{{Name: "foo", Value: fmt.Sprintf("bar%d", j)}}
    +		samples := []sample{}
    +		for k := 0; k < numSamples; k++ {
    +			samples = append(samples, sample{t: int64(k), v: float64(k)})
    +		}
    +		series = append(series, newMockSeries(labels, samples))
    +	}
    +	return newMockSeriesSet(series...)
    +}
    +
    +func makeMergeSeriesSet(numSeriesSets, numSeries, numSamples int) SeriesSet {
    +	seriesSets := []SeriesSet{}
    +	for i := 0; i < numSeriesSets; i++ {
    +		seriesSets = append(seriesSets, makeSeriesSet(numSeries, numSamples))
    +	}
    +	return NewMergeSeriesSet(seriesSets)
    +}
    +
    +func benchmarkDrain(seriesSet SeriesSet, b *testing.B) {
    +	for n := 0; n < b.N; n++ {
    +		for seriesSet.Next() {
    +			result = drainSamples(seriesSet.At().Iterator())
    +		}
    +	}
    +}
    +
    +func BenchmarkNoMergeSeriesSet_100_100(b *testing.B) {
    +	seriesSet := makeSeriesSet(100, 100)
    +	benchmarkDrain(seriesSet, b)
    +}
    +
    +func BenchmarkMergeSeriesSet(b *testing.B) {
    +	for _, bm := range []struct {
    +		numSeriesSets, numSeries, numSamples int
    +	}{
    +		{1, 100, 100},
    +		{10, 100, 100},
    +		{100, 100, 100},
    +	} {
    +		seriesSet := makeMergeSeriesSet(bm.numSeriesSets, bm.numSeries, bm.numSamples)
    +		b.Run(fmt.Sprintf("%d_%d_%d", bm.numSeriesSets, bm.numSeries, bm.numSamples), func(b *testing.B) {
    +			benchmarkDrain(seriesSet, b)
    +		})
    +	}
    +}
    diff --git a/src/prometheus/storage/interface.go b/src/prometheus/storage/interface.go
    new file mode 100644
    index 0000000..9c145c4
    --- /dev/null
    +++ b/src/prometheus/storage/interface.go
    @@ -0,0 +1,118 @@
    +// Copyright 2014 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package storage
    +
    +import (
    +	"context"
    +	"errors"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +)
    +
    +// The errors exposed.
    +var (
    +	ErrNotFound                    = errors.New("not found")
    +	ErrOutOfOrderSample            = errors.New("out of order sample")
    +	ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
    +	ErrOutOfBounds                 = errors.New("out of bounds")
    +)
    +
    +// Storage ingests and manages samples, along with various indexes. All methods
    +// are goroutine-safe. Storage implements storage.SampleAppender.
    +type Storage interface {
    +	Queryable
    +
    +	// StartTime returns the oldest timestamp stored in the storage.
    +	StartTime() (int64, error)
    +
    +	// Appender returns a new appender against the storage.
    +	Appender() (Appender, error)
    +
    +	// Close closes the storage and all its underlying resources.
    +	Close() error
    +}
    +
    +// A Queryable handles queries against a storage.
    +type Queryable interface {
    +	// Querier returns a new Querier on the storage.
    +	Querier(ctx context.Context, mint, maxt int64) (Querier, error)
    +}
    +
    +// Querier provides reading access to time series data.
    +type Querier interface {
    +	// Select returns a set of series that matches the given label matchers.
    +	Select(*SelectParams, ...*labels.Matcher) (SeriesSet, error)
    +
    +	// LabelValues returns all potential values for a label name.
    +	LabelValues(name string) ([]string, error)
    +
    +	// Close releases the resources of the Querier.
    +	Close() error
    +}
    +
    +// SelectParams specifies parameters passed to data selections.
    +type SelectParams struct {
    +	Step int64  // Query step size in milliseconds.
    +	Func string // String representation of surrounding function or aggregation.
    +}
    +
    +// QueryableFunc is an adapter to allow the use of ordinary functions as
    +// Queryables. It follows the idea of http.HandlerFunc.
    +type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error)
    +
    +// Querier calls f() with the given parameters.
    +func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
    +	return f(ctx, mint, maxt)
    +}
    +
    +// Appender provides batched appends against a storage.
    +type Appender interface {
    +	Add(l labels.Labels, t int64, v float64) (uint64, error)
    +
    +	AddFast(l labels.Labels, ref uint64, t int64, v float64) error
    +
    +	// Commit submits the collected samples and purges the batch.
    +	Commit() error
    +
    +	Rollback() error
    +}
    +
    +// SeriesSet contains a set of series.
    +type SeriesSet interface {
    +	Next() bool
    +	At() Series
    +	Err() error
    +}
    +
    +// Series represents a single time series.
    +type Series interface {
    +	// Labels returns the complete set of labels identifying the series.
    +	Labels() labels.Labels
    +
    +	// Iterator returns a new iterator of the data of the series.
    +	Iterator() SeriesIterator
    +}
    +
    +// SeriesIterator iterates over the data of a time series.
    +type SeriesIterator interface {
    +	// Seek advances the iterator forward to the value at or after
    +	// the given timestamp.
    +	Seek(t int64) bool
    +	// At returns the current timestamp/value pair.
    +	At() (t int64, v float64)
    +	// Next advances the iterator by one.
    +	Next() bool
    +	// Err returns the current error.
    +	Err() error
    +}
    diff --git a/src/prometheus/storage/noop.go b/src/prometheus/storage/noop.go
    new file mode 100644
    index 0000000..fa02443
    --- /dev/null
    +++ b/src/prometheus/storage/noop.go
    @@ -0,0 +1,54 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package storage
    +
    +import "github.com/prometheus/prometheus/pkg/labels"
    +
    +type noopQuerier struct{}
    +
    +// NoopQuerier is a Querier that does nothing.
    +func NoopQuerier() Querier {
    +	return noopQuerier{}
    +}
    +
    +func (noopQuerier) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, error) {
    +	return NoopSeriesSet(), nil
    +}
    +
    +func (noopQuerier) LabelValues(name string) ([]string, error) {
    +	return nil, nil
    +}
    +
    +func (noopQuerier) Close() error {
    +	return nil
    +}
    +
    +type noopSeriesSet struct{}
    +
    +// NoopSeriesSet is a SeriesSet that does nothing.
    +func NoopSeriesSet() SeriesSet {
    +	return noopSeriesSet{}
    +}
    +
    +func (noopSeriesSet) Next() bool {
    +	return false
    +}
    +
    +func (noopSeriesSet) At() Series {
    +	return nil
    +}
    +
    +func (noopSeriesSet) Err() error {
    +	return nil
    +}
    diff --git a/src/prometheus/storage/remote/client.go b/src/prometheus/storage/remote/client.go
    new file mode 100644
    index 0000000..ae42e99
    --- /dev/null
    +++ b/src/prometheus/storage/remote/client.go
    @@ -0,0 +1,176 @@
    +// Copyright 2016 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"bufio"
    +	"bytes"
    +	"context"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"time"
    +
    +	"github.com/gogo/protobuf/proto"
    +	"github.com/golang/snappy"
    +	"github.com/prometheus/common/model"
    +	"golang.org/x/net/context/ctxhttp"
    +
    +	config_util "github.com/prometheus/common/config"
    +	"github.com/prometheus/prometheus/prompb"
    +)
    +
    +const maxErrMsgLen = 256
    +
    +// Client allows reading and writing from/to a remote HTTP endpoint.
    +type Client struct {
    +	index   int // Used to differentiate clients in metrics.
    +	url     *config_util.URL
    +	client  *http.Client
    +	timeout time.Duration
    +}
    +
    +// ClientConfig configures a Client.
    +type ClientConfig struct {
    +	URL              *config_util.URL
    +	Timeout          model.Duration
    +	HTTPClientConfig config_util.HTTPClientConfig
    +}
    +
    +// NewClient creates a new Client.
    +func NewClient(index int, conf *ClientConfig) (*Client, error) {
    +	httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage")
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return &Client{
    +		index:   index,
    +		url:     conf.URL,
    +		client:  httpClient,
    +		timeout: time.Duration(conf.Timeout),
    +	}, nil
    +}
    +
    +type recoverableError struct {
    +	error
    +}
    +
    +// Store sends a batch of samples to the HTTP endpoint.
    +func (c *Client) Store(ctx context.Context, req *prompb.WriteRequest) error {
    +	data, err := proto.Marshal(req)
    +	if err != nil {
    +		return err
    +	}
    +
    +	compressed := snappy.Encode(nil, data)
    +	httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
    +	if err != nil {
    +		// Errors from NewRequest are from unparseable URLs, so are not
    +		// recoverable.
    +		return err
    +	}
    +	httpReq.Header.Add("Content-Encoding", "snappy")
    +	httpReq.Header.Set("Content-Type", "application/x-protobuf")
    +	httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
    +	httpReq = httpReq.WithContext(ctx)
    +
    +	ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
    +	defer cancel()
    +
    +	httpResp, err := ctxhttp.Do(ctx, c.client, httpReq)
    +	if err != nil {
    +		// Errors from client.Do are from (for example) network errors, so are
    +		// recoverable.
    +		return recoverableError{err}
    +	}
    +	defer httpResp.Body.Close()
    +
    +	if httpResp.StatusCode/100 != 2 {
    +		scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
    +		line := ""
    +		if scanner.Scan() {
    +			line = scanner.Text()
    +		}
    +		err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
    +	}
    +	if httpResp.StatusCode/100 == 5 {
    +		return recoverableError{err}
    +	}
    +	return err
    +}
    +
    +// Name identifies the client.
    +func (c Client) Name() string {
    +	return fmt.Sprintf("%d:%s", c.index, c.url)
    +}
    +
    +// Read reads from a remote endpoint.
    +func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
    +	req := &prompb.ReadRequest{
    +		// TODO: Support batching multiple queries into one read request,
    +		// as the protobuf interface allows for it.
    +		Queries: []*prompb.Query{
    +			query,
    +		},
    +	}
    +	data, err := proto.Marshal(req)
    +	if err != nil {
    +		return nil, fmt.Errorf("unable to marshal read request: %v", err)
    +	}
    +
    +	compressed := snappy.Encode(nil, data)
    +	httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
    +	if err != nil {
    +		return nil, fmt.Errorf("unable to create request: %v", err)
    +	}
    +	httpReq.Header.Add("Content-Encoding", "snappy")
    +	httpReq.Header.Set("Content-Type", "application/x-protobuf")
    +	httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0")
    +
    +	ctx, cancel := context.WithTimeout(ctx, c.timeout)
    +	defer cancel()
    +
    +	httpResp, err := ctxhttp.Do(ctx, c.client, httpReq)
    +	if err != nil {
    +		return nil, fmt.Errorf("error sending request: %v", err)
    +	}
    +	defer httpResp.Body.Close()
    +	if httpResp.StatusCode/100 != 2 {
    +		return nil, fmt.Errorf("server returned HTTP status %s", httpResp.Status)
    +	}
    +
    +	compressed, err = ioutil.ReadAll(httpResp.Body)
    +	if err != nil {
    +		return nil, fmt.Errorf("error reading response: %v", err)
    +	}
    +
    +	uncompressed, err := snappy.Decode(nil, compressed)
    +	if err != nil {
    +		return nil, fmt.Errorf("error reading response: %v", err)
    +	}
    +
    +	var resp prompb.ReadResponse
    +	err = proto.Unmarshal(uncompressed, &resp)
    +	if err != nil {
    +		return nil, fmt.Errorf("unable to unmarshal response body: %v", err)
    +	}
    +
    +	if len(resp.Results) != len(req.Queries) {
    +		return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results))
    +	}
    +
    +	return resp.Results[0], nil
    +}
    diff --git a/src/prometheus/storage/remote/client_test.go b/src/prometheus/storage/remote/client_test.go
    new file mode 100644
    index 0000000..73ec875
    --- /dev/null
    +++ b/src/prometheus/storage/remote/client_test.go
    @@ -0,0 +1,84 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"context"
    +	"fmt"
    +	"net/http"
    +	"net/http/httptest"
    +	"net/url"
    +	"reflect"
    +	"strings"
    +	"testing"
    +	"time"
    +
    +	config_util "github.com/prometheus/common/config"
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/prompb"
    +)
    +
    +var longErrMessage = strings.Repeat("error message", maxErrMsgLen)
    +
    +func TestStoreHTTPErrorHandling(t *testing.T) {
    +	tests := []struct {
    +		code int
    +		err  error
    +	}{
    +		{
    +			code: 200,
    +			err:  nil,
    +		},
    +		{
    +			code: 300,
    +			err:  fmt.Errorf("server returned HTTP status 300 Multiple Choices: " + longErrMessage[:maxErrMsgLen]),
    +		},
    +		{
    +			code: 404,
    +			err:  fmt.Errorf("server returned HTTP status 404 Not Found: " + longErrMessage[:maxErrMsgLen]),
    +		},
    +		{
    +			code: 500,
    +			err:  recoverableError{fmt.Errorf("server returned HTTP status 500 Internal Server Error: " + longErrMessage[:maxErrMsgLen])},
    +		},
    +	}
    +
    +	for i, test := range tests {
    +		server := httptest.NewServer(
    +			http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
    +				http.Error(w, longErrMessage, test.code)
    +			}),
    +		)
    +
    +		serverURL, err := url.Parse(server.URL)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +
    +		c, err := NewClient(0, &ClientConfig{
    +			URL:     &config_util.URL{URL: serverURL},
    +			Timeout: model.Duration(time.Second),
    +		})
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +
    +		err = c.Store(context.Background(), &prompb.WriteRequest{})
    +		if !reflect.DeepEqual(err, test.err) {
    +			t.Errorf("%d. Unexpected error; want %v, got %v", i, test.err, err)
    +		}
    +
    +		server.Close()
    +	}
    +}
    diff --git a/src/prometheus/storage/remote/codec.go b/src/prometheus/storage/remote/codec.go
    new file mode 100644
    index 0000000..3063b64
    --- /dev/null
    +++ b/src/prometheus/storage/remote/codec.go
    @@ -0,0 +1,383 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"fmt"
    +	"io/ioutil"
    +	"net/http"
    +	"sort"
    +
    +	"github.com/gogo/protobuf/proto"
    +	"github.com/golang/snappy"
    +	"github.com/prometheus/common/model"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/prompb"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// DecodeReadRequest reads a remote.Request from a http.Request.
    +func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) {
    +	compressed, err := ioutil.ReadAll(r.Body)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	reqBuf, err := snappy.Decode(nil, compressed)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var req prompb.ReadRequest
    +	if err := proto.Unmarshal(reqBuf, &req); err != nil {
    +		return nil, err
    +	}
    +
    +	return &req, nil
    +}
    +
    +// EncodeReadResponse writes a remote.Response to a http.ResponseWriter.
    +func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error {
    +	data, err := proto.Marshal(resp)
    +	if err != nil {
    +		return err
    +	}
    +
    +	w.Header().Set("Content-Type", "application/x-protobuf")
    +	w.Header().Set("Content-Encoding", "snappy")
    +
    +	compressed := snappy.Encode(nil, data)
    +	_, err = w.Write(compressed)
    +	return err
    +}
    +
    +// ToWriteRequest converts an array of samples into a WriteRequest proto.
    +func ToWriteRequest(samples []*model.Sample) *prompb.WriteRequest {
    +	req := &prompb.WriteRequest{
    +		Timeseries: make([]*prompb.TimeSeries, 0, len(samples)),
    +	}
    +
    +	for _, s := range samples {
    +		ts := prompb.TimeSeries{
    +			Labels: MetricToLabelProtos(s.Metric),
    +			Samples: []*prompb.Sample{
    +				{
    +					Value:     float64(s.Value),
    +					Timestamp: int64(s.Timestamp),
    +				},
    +			},
    +		}
    +		req.Timeseries = append(req.Timeseries, &ts)
    +	}
    +
    +	return req
    +}
    +
    +// ToQuery builds a Query proto.
    +func ToQuery(from, to int64, matchers []*labels.Matcher, p *storage.SelectParams) (*prompb.Query, error) {
    +	ms, err := toLabelMatchers(matchers)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var rp *prompb.ReadHints
    +	if p != nil {
    +		rp = &prompb.ReadHints{
    +			StepMs: p.Step,
    +			Func:   p.Func,
    +		}
    +	}
    +
    +	return &prompb.Query{
    +		StartTimestampMs: from,
    +		EndTimestampMs:   to,
    +		Matchers:         ms,
    +		Hints:            rp,
    +	}, nil
    +}
    +
    +// FromQuery unpacks a Query proto.
    +func FromQuery(req *prompb.Query) (int64, int64, []*labels.Matcher, error) {
    +	matchers, err := fromLabelMatchers(req.Matchers)
    +	if err != nil {
    +		return 0, 0, nil, err
    +	}
    +	return req.StartTimestampMs, req.EndTimestampMs, matchers, nil
    +}
    +
    +// ToQueryResult builds a QueryResult proto.
    +func ToQueryResult(ss storage.SeriesSet) (*prompb.QueryResult, error) {
    +	resp := &prompb.QueryResult{}
    +	for ss.Next() {
    +		series := ss.At()
    +		iter := series.Iterator()
    +		samples := []*prompb.Sample{}
    +
    +		for iter.Next() {
    +			ts, val := iter.At()
    +			samples = append(samples, &prompb.Sample{
    +				Timestamp: ts,
    +				Value:     val,
    +			})
    +		}
    +		if err := iter.Err(); err != nil {
    +			return nil, err
    +		}
    +
    +		resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
    +			Labels:  labelsToLabelsProto(series.Labels()),
    +			Samples: samples,
    +		})
    +	}
    +	if err := ss.Err(); err != nil {
    +		return nil, err
    +	}
    +	return resp, nil
    +}
    +
    +// FromQueryResult unpacks a QueryResult proto.
    +func FromQueryResult(res *prompb.QueryResult) storage.SeriesSet {
    +	series := make([]storage.Series, 0, len(res.Timeseries))
    +	for _, ts := range res.Timeseries {
    +		labels := labelProtosToLabels(ts.Labels)
    +		if err := validateLabelsAndMetricName(labels); err != nil {
    +			return errSeriesSet{err: err}
    +		}
    +
    +		series = append(series, &concreteSeries{
    +			labels:  labels,
    +			samples: ts.Samples,
    +		})
    +	}
    +	sort.Sort(byLabel(series))
    +	return &concreteSeriesSet{
    +		series: series,
    +	}
    +}
    +
    +type byLabel []storage.Series
    +
    +func (a byLabel) Len() int           { return len(a) }
    +func (a byLabel) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
    +func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
    +
    +// errSeriesSet implements storage.SeriesSet, just returning an error.
    +type errSeriesSet struct {
    +	err error
    +}
    +
    +func (errSeriesSet) Next() bool {
    +	return false
    +}
    +
    +func (errSeriesSet) At() storage.Series {
    +	return nil
    +}
    +
    +func (e errSeriesSet) Err() error {
    +	return e.err
    +}
    +
    +// concreteSeriesSet implements storage.SeriesSet.
    +type concreteSeriesSet struct {
    +	cur    int
    +	series []storage.Series
    +}
    +
    +func (c *concreteSeriesSet) Next() bool {
    +	c.cur++
    +	return c.cur-1 < len(c.series)
    +}
    +
    +func (c *concreteSeriesSet) At() storage.Series {
    +	return c.series[c.cur-1]
    +}
    +
    +func (c *concreteSeriesSet) Err() error {
    +	return nil
    +}
    +
    +// concreteSeries implements storage.Series.
    +type concreteSeries struct {
    +	labels  labels.Labels
    +	samples []*prompb.Sample
    +}
    +
    +func (c *concreteSeries) Labels() labels.Labels {
    +	return labels.New(c.labels...)
    +}
    +
    +func (c *concreteSeries) Iterator() storage.SeriesIterator {
    +	return newConcreteSeriersIterator(c)
    +}
    +
    +// concreteSeriesIterator implements storage.SeriesIterator.
    +type concreteSeriesIterator struct {
    +	cur    int
    +	series *concreteSeries
    +}
    +
    +func newConcreteSeriersIterator(series *concreteSeries) storage.SeriesIterator {
    +	return &concreteSeriesIterator{
    +		cur:    -1,
    +		series: series,
    +	}
    +}
    +
    +// Seek implements storage.SeriesIterator.
    +func (c *concreteSeriesIterator) Seek(t int64) bool {
    +	c.cur = sort.Search(len(c.series.samples), func(n int) bool {
    +		return c.series.samples[n].Timestamp >= t
    +	})
    +	return c.cur < len(c.series.samples)
    +}
    +
    +// At implements storage.SeriesIterator.
    +func (c *concreteSeriesIterator) At() (t int64, v float64) {
    +	s := c.series.samples[c.cur]
    +	return s.Timestamp, s.Value
    +}
    +
    +// Next implements storage.SeriesIterator.
    +func (c *concreteSeriesIterator) Next() bool {
    +	c.cur++
    +	return c.cur < len(c.series.samples)
    +}
    +
    +// Err implements storage.SeriesIterator.
    +func (c *concreteSeriesIterator) Err() error {
    +	return nil
    +}
    +
    +// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read.
    +func validateLabelsAndMetricName(ls labels.Labels) error {
    +	for _, l := range ls {
    +		if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
    +			return fmt.Errorf("Invalid metric name: %v", l.Value)
    +		}
    +		if !model.LabelName(l.Name).IsValid() {
    +			return fmt.Errorf("Invalid label name: %v", l.Name)
    +		}
    +		if !model.LabelValue(l.Value).IsValid() {
    +			return fmt.Errorf("Invalid label value: %v", l.Value)
    +		}
    +	}
    +	return nil
    +}
    +
    +func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) {
    +	pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers))
    +	for _, m := range matchers {
    +		var mType prompb.LabelMatcher_Type
    +		switch m.Type {
    +		case labels.MatchEqual:
    +			mType = prompb.LabelMatcher_EQ
    +		case labels.MatchNotEqual:
    +			mType = prompb.LabelMatcher_NEQ
    +		case labels.MatchRegexp:
    +			mType = prompb.LabelMatcher_RE
    +		case labels.MatchNotRegexp:
    +			mType = prompb.LabelMatcher_NRE
    +		default:
    +			return nil, fmt.Errorf("invalid matcher type")
    +		}
    +		pbMatchers = append(pbMatchers, &prompb.LabelMatcher{
    +			Type:  mType,
    +			Name:  m.Name,
    +			Value: m.Value,
    +		})
    +	}
    +	return pbMatchers, nil
    +}
    +
    +func fromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) {
    +	result := make([]*labels.Matcher, 0, len(matchers))
    +	for _, matcher := range matchers {
    +		var mtype labels.MatchType
    +		switch matcher.Type {
    +		case prompb.LabelMatcher_EQ:
    +			mtype = labels.MatchEqual
    +		case prompb.LabelMatcher_NEQ:
    +			mtype = labels.MatchNotEqual
    +		case prompb.LabelMatcher_RE:
    +			mtype = labels.MatchRegexp
    +		case prompb.LabelMatcher_NRE:
    +			mtype = labels.MatchNotRegexp
    +		default:
    +			return nil, fmt.Errorf("invalid matcher type")
    +		}
    +		matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value)
    +		if err != nil {
    +			return nil, err
    +		}
    +		result = append(result, matcher)
    +	}
    +	return result, nil
    +}
    +
    +// MetricToLabelProtos builds a []*prompb.Label from a model.Metric
    +func MetricToLabelProtos(metric model.Metric) []*prompb.Label {
    +	labels := make([]*prompb.Label, 0, len(metric))
    +	for k, v := range metric {
    +		labels = append(labels, &prompb.Label{
    +			Name:  string(k),
    +			Value: string(v),
    +		})
    +	}
    +	sort.Slice(labels, func(i int, j int) bool {
    +		return labels[i].Name < labels[j].Name
    +	})
    +	return labels
    +}
    +
    +// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
    +func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
    +	metric := make(model.Metric, len(labelPairs))
    +	for _, l := range labelPairs {
    +		metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
    +	}
    +	return metric
    +}
    +
    +func labelProtosToLabels(labelPairs []*prompb.Label) labels.Labels {
    +	result := make(labels.Labels, 0, len(labelPairs))
    +	for _, l := range labelPairs {
    +		result = append(result, labels.Label{
    +			Name:  l.Name,
    +			Value: l.Value,
    +		})
    +	}
    +	sort.Sort(result)
    +	return result
    +}
    +
    +func labelsToLabelsProto(labels labels.Labels) []*prompb.Label {
    +	result := make([]*prompb.Label, 0, len(labels))
    +	for _, l := range labels {
    +		result = append(result, &prompb.Label{
    +			Name:  l.Name,
    +			Value: l.Value,
    +		})
    +	}
    +	return result
    +}
    +
    +func labelsToMetric(ls labels.Labels) model.Metric {
    +	metric := make(model.Metric, len(ls))
    +	for _, l := range ls {
    +		metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
    +	}
    +	return metric
    +}
    diff --git a/src/prometheus/storage/remote/codec_test.go b/src/prometheus/storage/remote/codec_test.go
    new file mode 100644
    index 0000000..c6b9010
    --- /dev/null
    +++ b/src/prometheus/storage/remote/codec_test.go
    @@ -0,0 +1,147 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"testing"
    +
    +	"github.com/stretchr/testify/require"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/prompb"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +func TestValidateLabelsAndMetricName(t *testing.T) {
    +	tests := []struct {
    +		input       labels.Labels
    +		expectedErr string
    +		shouldPass  bool
    +	}{
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "name",
    +				"labelName", "labelValue",
    +			),
    +			expectedErr: "",
    +			shouldPass:  true,
    +		},
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "name",
    +				"_labelName", "labelValue",
    +			),
    +			expectedErr: "",
    +			shouldPass:  true,
    +		},
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "name",
    +				"@labelName", "labelValue",
    +			),
    +			expectedErr: "Invalid label name: @labelName",
    +			shouldPass:  false,
    +		},
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "name",
    +				"123labelName", "labelValue",
    +			),
    +			expectedErr: "Invalid label name: 123labelName",
    +			shouldPass:  false,
    +		},
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "name",
    +				"", "labelValue",
    +			),
    +			expectedErr: "Invalid label name: ",
    +			shouldPass:  false,
    +		},
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "name",
    +				"labelName", string([]byte{0xff}),
    +			),
    +			expectedErr: "Invalid label value: " + string([]byte{0xff}),
    +			shouldPass:  false,
    +		},
    +		{
    +			input: labels.FromStrings(
    +				"__name__", "@invalid_name",
    +			),
    +			expectedErr: "Invalid metric name: @invalid_name",
    +			shouldPass:  false,
    +		},
    +	}
    +
    +	for _, test := range tests {
    +		err := validateLabelsAndMetricName(test.input)
    +		if test.shouldPass != (err == nil) {
    +			if test.shouldPass {
    +				t.Fatalf("Test should pass, got unexpected error: %v", err)
    +			} else {
    +				t.Fatalf("Test should fail, unexpected error, got: %v, expected: %v", err, test.expectedErr)
    +			}
    +		}
    +	}
    +}
    +
    +func TestConcreteSeriesSet(t *testing.T) {
    +	series1 := &concreteSeries{
    +		labels:  labels.FromStrings("foo", "bar"),
    +		samples: []*prompb.Sample{&prompb.Sample{Value: 1, Timestamp: 2}},
    +	}
    +	series2 := &concreteSeries{
    +		labels:  labels.FromStrings("foo", "baz"),
    +		samples: []*prompb.Sample{&prompb.Sample{Value: 3, Timestamp: 4}},
    +	}
    +	c := &concreteSeriesSet{
    +		series: []storage.Series{series1, series2},
    +	}
    +	if !c.Next() {
    +		t.Fatalf("Expected Next() to be true.")
    +	}
    +	if c.At() != series1 {
    +		t.Fatalf("Unexpected series returned.")
    +	}
    +	if !c.Next() {
    +		t.Fatalf("Expected Next() to be true.")
    +	}
    +	if c.At() != series2 {
    +		t.Fatalf("Unexpected series returned.")
    +	}
    +	if c.Next() {
    +		t.Fatalf("Expected Next() to be false.")
    +	}
    +}
    +
    +func TestConcreteSeriesClonesLabels(t *testing.T) {
    +	lbls := labels.Labels{
    +		labels.Label{Name: "a", Value: "b"},
    +		labels.Label{Name: "c", Value: "d"},
    +	}
    +	cs := concreteSeries{
    +		labels: labels.New(lbls...),
    +	}
    +
    +	gotLabels := cs.Labels()
    +	require.Equal(t, lbls, gotLabels)
    +
    +	gotLabels[0].Value = "foo"
    +	gotLabels[1].Value = "bar"
    +
    +	gotLabels = cs.Labels()
    +	require.Equal(t, lbls, gotLabels)
    +}
    diff --git a/src/prometheus/storage/remote/ewma.go b/src/prometheus/storage/remote/ewma.go
    new file mode 100644
    index 0000000..82b6dd1
    --- /dev/null
    +++ b/src/prometheus/storage/remote/ewma.go
    @@ -0,0 +1,68 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"sync"
    +	"sync/atomic"
    +	"time"
    +)
    +
    +// ewmaRate tracks an exponentially weighted moving average of a per-second rate.
    +type ewmaRate struct {
    +	newEvents int64
    +	alpha     float64
    +	interval  time.Duration
    +	lastRate  float64
    +	init      bool
    +	mutex     sync.Mutex
    +}
    +
    +// newEWMARate always allocates a new ewmaRate, as this guarantees the atomically
    +// accessed int64 will be aligned on ARM.  See prometheus#2666.
    +func newEWMARate(alpha float64, interval time.Duration) *ewmaRate {
    +	return &ewmaRate{
    +		alpha:    alpha,
    +		interval: interval,
    +	}
    +}
    +
    +// rate returns the per-second rate.
    +func (r *ewmaRate) rate() float64 {
    +	r.mutex.Lock()
    +	defer r.mutex.Unlock()
    +	return r.lastRate
    +}
    +
    +// tick assumes to be called every r.interval.
    +func (r *ewmaRate) tick() {
    +	newEvents := atomic.LoadInt64(&r.newEvents)
    +	atomic.AddInt64(&r.newEvents, -newEvents)
    +	instantRate := float64(newEvents) / r.interval.Seconds()
    +
    +	r.mutex.Lock()
    +	defer r.mutex.Unlock()
    +
    +	if r.init {
    +		r.lastRate += r.alpha * (instantRate - r.lastRate)
    +	} else {
    +		r.init = true
    +		r.lastRate = instantRate
    +	}
    +}
    +
    +// inc counts one event.
    +func (r *ewmaRate) incr(incr int64) {
    +	atomic.AddInt64(&r.newEvents, incr)
    +}
    diff --git a/src/prometheus/storage/remote/queue_manager.go b/src/prometheus/storage/remote/queue_manager.go
    new file mode 100644
    index 0000000..927601d
    --- /dev/null
    +++ b/src/prometheus/storage/remote/queue_manager.go
    @@ -0,0 +1,542 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"context"
    +	"math"
    +	"sync"
    +	"sync/atomic"
    +	"time"
    +
    +	"golang.org/x/time/rate"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/go-kit/kit/log/level"
    +	"github.com/prometheus/client_golang/prometheus"
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/prompb"
    +	"github.com/prometheus/prometheus/relabel"
    +)
    +
    +// String constants for instrumentation.
    +const (
    +	namespace = "prometheus"
    +	subsystem = "remote_storage"
    +	queue     = "queue"
    +
    +	// We track samples in/out and how long pushes take using an Exponentially
    +	// Weighted Moving Average.
    +	ewmaWeight          = 0.2
    +	shardUpdateDuration = 10 * time.Second
    +
    +	// Allow 30% too many shards before scaling down.
    +	shardToleranceFraction = 0.3
    +
    +	// Limit to 1 log event every 10s
    +	logRateLimit = 0.1
    +	logBurst     = 10
    +)
    +
    +var (
    +	succeededSamplesTotal = prometheus.NewCounterVec(
    +		prometheus.CounterOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "succeeded_samples_total",
    +			Help:      "Total number of samples successfully sent to remote storage.",
    +		},
    +		[]string{queue},
    +	)
    +	failedSamplesTotal = prometheus.NewCounterVec(
    +		prometheus.CounterOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "failed_samples_total",
    +			Help:      "Total number of samples which failed on send to remote storage.",
    +		},
    +		[]string{queue},
    +	)
    +	droppedSamplesTotal = prometheus.NewCounterVec(
    +		prometheus.CounterOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "dropped_samples_total",
    +			Help:      "Total number of samples which were dropped due to the queue being full.",
    +		},
    +		[]string{queue},
    +	)
    +	sentBatchDuration = prometheus.NewHistogramVec(
    +		prometheus.HistogramOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "sent_batch_duration_seconds",
    +			Help:      "Duration of sample batch send calls to the remote storage.",
    +			Buckets:   prometheus.DefBuckets,
    +		},
    +		[]string{queue},
    +	)
    +	queueLength = prometheus.NewGaugeVec(
    +		prometheus.GaugeOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "queue_length",
    +			Help:      "The number of processed samples queued to be sent to the remote storage.",
    +		},
    +		[]string{queue},
    +	)
    +	queueCapacity = prometheus.NewGaugeVec(
    +		prometheus.GaugeOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "queue_capacity",
    +			Help:      "The capacity of the queue of samples to be sent to the remote storage.",
    +		},
    +		[]string{queue},
    +	)
    +	numShards = prometheus.NewGaugeVec(
    +		prometheus.GaugeOpts{
    +			Namespace: namespace,
    +			Subsystem: subsystem,
    +			Name:      "shards",
    +			Help:      "The number of shards used for parallel sending to the remote storage.",
    +		},
    +		[]string{queue},
    +	)
    +)
    +
    +func init() {
    +	prometheus.MustRegister(succeededSamplesTotal)
    +	prometheus.MustRegister(failedSamplesTotal)
    +	prometheus.MustRegister(droppedSamplesTotal)
    +	prometheus.MustRegister(sentBatchDuration)
    +	prometheus.MustRegister(queueLength)
    +	prometheus.MustRegister(queueCapacity)
    +	prometheus.MustRegister(numShards)
    +}
    +
    +// StorageClient defines an interface for sending a batch of samples to an
    +// external timeseries database.
    +type StorageClient interface {
    +	// Store stores the given samples in the remote storage.
    +	Store(context.Context, *prompb.WriteRequest) error
    +	// Name identifies the remote storage implementation.
    +	Name() string
    +}
    +
    +// QueueManager manages a queue of samples to be sent to the Storage
    +// indicated by the provided StorageClient.
    +type QueueManager struct {
    +	logger log.Logger
    +
    +	flushDeadline  time.Duration
    +	cfg            config.QueueConfig
    +	externalLabels model.LabelSet
    +	relabelConfigs []*config.RelabelConfig
    +	client         StorageClient
    +	queueName      string
    +	logLimiter     *rate.Limiter
    +
    +	shardsMtx   sync.Mutex
    +	shards      *shards
    +	numShards   int
    +	reshardChan chan int
    +	quit        chan struct{}
    +	wg          sync.WaitGroup
    +
    +	samplesIn, samplesOut, samplesOutDuration *ewmaRate
    +	integralAccumulator                       float64
    +}
    +
    +// NewQueueManager builds a new QueueManager.
    +func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels model.LabelSet, relabelConfigs []*config.RelabelConfig, client StorageClient, flushDeadline time.Duration) *QueueManager {
    +	if logger == nil {
    +		logger = log.NewNopLogger()
    +	} else {
    +		logger = log.With(logger, "queue", client.Name())
    +	}
    +	t := &QueueManager{
    +		logger:         logger,
    +		flushDeadline:  flushDeadline,
    +		cfg:            cfg,
    +		externalLabels: externalLabels,
    +		relabelConfigs: relabelConfigs,
    +		client:         client,
    +		queueName:      client.Name(),
    +
    +		logLimiter:  rate.NewLimiter(logRateLimit, logBurst),
    +		numShards:   1,
    +		reshardChan: make(chan int),
    +		quit:        make(chan struct{}),
    +
    +		samplesIn:          newEWMARate(ewmaWeight, shardUpdateDuration),
    +		samplesOut:         newEWMARate(ewmaWeight, shardUpdateDuration),
    +		samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),
    +	}
    +	t.shards = t.newShards(t.numShards)
    +	numShards.WithLabelValues(t.queueName).Set(float64(t.numShards))
    +	queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))
    +
    +	// Initialise counter labels to zero.
    +	sentBatchDuration.WithLabelValues(t.queueName)
    +	succeededSamplesTotal.WithLabelValues(t.queueName)
    +	failedSamplesTotal.WithLabelValues(t.queueName)
    +	droppedSamplesTotal.WithLabelValues(t.queueName)
    +
    +	return t
    +}
    +
    +// Append queues a sample to be sent to the remote storage. It drops the
    +// sample on the floor if the queue is full.
    +// Always returns nil.
    +func (t *QueueManager) Append(s *model.Sample) error {
    +	snew := *s
    +	snew.Metric = s.Metric.Clone()
    +
    +	for ln, lv := range t.externalLabels {
    +		if _, ok := s.Metric[ln]; !ok {
    +			snew.Metric[ln] = lv
    +		}
    +	}
    +
    +	snew.Metric = model.Metric(
    +		relabel.Process(model.LabelSet(snew.Metric), t.relabelConfigs...))
    +
    +	if snew.Metric == nil {
    +		return nil
    +	}
    +
    +	t.shardsMtx.Lock()
    +	enqueued := t.shards.enqueue(&snew)
    +	t.shardsMtx.Unlock()
    +
    +	if enqueued {
    +		queueLength.WithLabelValues(t.queueName).Inc()
    +	} else {
    +		droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
    +		if t.logLimiter.Allow() {
    +			level.Warn(t.logger).Log("msg", "Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.")
    +		}
    +	}
    +	return nil
    +}
    +
    +// NeedsThrottling implements storage.SampleAppender. It will always return
    +// false as a remote storage drops samples on the floor if backlogging instead
    +// of asking for throttling.
    +func (*QueueManager) NeedsThrottling() bool {
    +	return false
    +}
    +
    +// Start the queue manager sending samples to the remote storage.
    +// Does not block.
    +func (t *QueueManager) Start() {
    +	t.wg.Add(2)
    +	go t.updateShardsLoop()
    +	go t.reshardLoop()
    +
    +	t.shardsMtx.Lock()
    +	defer t.shardsMtx.Unlock()
    +	t.shards.start()
    +}
    +
    +// Stop stops sending samples to the remote storage and waits for pending
    +// sends to complete.
    +func (t *QueueManager) Stop() {
    +	level.Info(t.logger).Log("msg", "Stopping remote storage...")
    +	close(t.quit)
    +	t.wg.Wait()
    +
    +	t.shardsMtx.Lock()
    +	defer t.shardsMtx.Unlock()
    +	t.shards.stop(t.flushDeadline)
    +
    +	level.Info(t.logger).Log("msg", "Remote storage stopped.")
    +}
    +
    +func (t *QueueManager) updateShardsLoop() {
    +	defer t.wg.Done()
    +
    +	ticker := time.NewTicker(shardUpdateDuration)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +			t.calculateDesiredShards()
    +		case <-t.quit:
    +			return
    +		}
    +	}
    +}
    +
    +func (t *QueueManager) calculateDesiredShards() {
    +	t.samplesIn.tick()
    +	t.samplesOut.tick()
    +	t.samplesOutDuration.tick()
    +
    +	// We use the number of incoming samples as a prediction of how much work we
    +	// will need to do next iteration.  We add to this any pending samples
    +	// (received - send) so we can catch up with any backlog. We use the average
    +	// outgoing batch latency to work out how many shards we need.
    +	var (
    +		samplesIn          = t.samplesIn.rate()
    +		samplesOut         = t.samplesOut.rate()
    +		samplesPending     = samplesIn - samplesOut
    +		samplesOutDuration = t.samplesOutDuration.rate()
    +	)
    +
    +	// We use an integral accumulator, like in a PID, to help dampen oscillation.
    +	t.integralAccumulator = t.integralAccumulator + (samplesPending * 0.1)
    +
    +	if samplesOut <= 0 {
    +		return
    +	}
    +
    +	var (
    +		timePerSample = samplesOutDuration / samplesOut
    +		desiredShards = (timePerSample * (samplesIn + samplesPending + t.integralAccumulator)) / float64(time.Second)
    +	)
    +	level.Debug(t.logger).Log("msg", "QueueManager.caclulateDesiredShards",
    +		"samplesIn", samplesIn, "samplesOut", samplesOut,
    +		"samplesPending", samplesPending, "desiredShards", desiredShards)
    +
    +	// Changes in the number of shards must be greater than shardToleranceFraction.
    +	var (
    +		lowerBound = float64(t.numShards) * (1. - shardToleranceFraction)
    +		upperBound = float64(t.numShards) * (1. + shardToleranceFraction)
    +	)
    +	level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop",
    +		"lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound)
    +	if lowerBound <= desiredShards && desiredShards <= upperBound {
    +		return
    +	}
    +
    +	numShards := int(math.Ceil(desiredShards))
    +	if numShards > t.cfg.MaxShards {
    +		numShards = t.cfg.MaxShards
    +	} else if numShards < 1 {
    +		numShards = 1
    +	}
    +	if numShards == t.numShards {
    +		return
    +	}
    +
    +	// Resharding can take some time, and we want this loop
    +	// to stay close to shardUpdateDuration.
    +	select {
    +	case t.reshardChan <- numShards:
    +		level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", numShards)
    +		t.numShards = numShards
    +	default:
    +		level.Info(t.logger).Log("msg", "Currently resharding, skipping.")
    +	}
    +}
    +
    +func (t *QueueManager) reshardLoop() {
    +	defer t.wg.Done()
    +
    +	for {
    +		select {
    +		case numShards := <-t.reshardChan:
    +			t.reshard(numShards)
    +		case <-t.quit:
    +			return
    +		}
    +	}
    +}
    +
    +func (t *QueueManager) reshard(n int) {
    +	numShards.WithLabelValues(t.queueName).Set(float64(n))
    +
    +	t.shardsMtx.Lock()
    +	newShards := t.newShards(n)
    +	oldShards := t.shards
    +	t.shards = newShards
    +	t.shardsMtx.Unlock()
    +
    +	oldShards.stop(t.flushDeadline)
    +
    +	// We start the newShards after we have stopped (the therefore completely
    +	// flushed) the oldShards, to guarantee we only every deliver samples in
    +	// order.
    +	newShards.start()
    +}
    +
    +type shards struct {
    +	qm      *QueueManager
    +	queues  []chan *model.Sample
    +	done    chan struct{}
    +	running int32
    +	ctx     context.Context
    +	cancel  context.CancelFunc
    +}
    +
    +func (t *QueueManager) newShards(numShards int) *shards {
    +	queues := make([]chan *model.Sample, numShards)
    +	for i := 0; i < numShards; i++ {
    +		queues[i] = make(chan *model.Sample, t.cfg.Capacity)
    +	}
    +	ctx, cancel := context.WithCancel(context.Background())
    +	s := &shards{
    +		qm:      t,
    +		queues:  queues,
    +		done:    make(chan struct{}),
    +		running: int32(numShards),
    +		ctx:     ctx,
    +		cancel:  cancel,
    +	}
    +	return s
    +}
    +
    +func (s *shards) len() int {
    +	return len(s.queues)
    +}
    +
    +func (s *shards) start() {
    +	for i := 0; i < len(s.queues); i++ {
    +		go s.runShard(i)
    +	}
    +}
    +
    +func (s *shards) stop(deadline time.Duration) {
    +	// Attempt a clean shutdown.
    +	for _, shard := range s.queues {
    +		close(shard)
    +	}
    +	select {
    +	case <-s.done:
    +		return
    +	case <-time.After(deadline):
    +		level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown")
    +	}
    +
    +	// Force an unclean shutdown.
    +	s.cancel()
    +	<-s.done
    +	return
    +}
    +
    +func (s *shards) enqueue(sample *model.Sample) bool {
    +	s.qm.samplesIn.incr(1)
    +
    +	fp := sample.Metric.FastFingerprint()
    +	shard := uint64(fp) % uint64(len(s.queues))
    +
    +	select {
    +	case s.queues[shard] <- sample:
    +		return true
    +	default:
    +		return false
    +	}
    +}
    +
    +func (s *shards) runShard(i int) {
    +	defer func() {
    +		if atomic.AddInt32(&s.running, -1) == 0 {
    +			close(s.done)
    +		}
    +	}()
    +
    +	queue := s.queues[i]
    +
    +	// Send batches of at most MaxSamplesPerSend samples to the remote storage.
    +	// If we have fewer samples than that, flush them out after a deadline
    +	// anyways.
    +	pendingSamples := model.Samples{}
    +
    +	timer := time.NewTimer(s.qm.cfg.BatchSendDeadline)
    +	stop := func() {
    +		if !timer.Stop() {
    +			select {
    +			case <-timer.C:
    +			default:
    +			}
    +		}
    +	}
    +	defer stop()
    +
    +	for {
    +		select {
    +		case <-s.ctx.Done():
    +			return
    +
    +		case sample, ok := <-queue:
    +			if !ok {
    +				if len(pendingSamples) > 0 {
    +					level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", len(pendingSamples))
    +					s.sendSamples(pendingSamples)
    +					level.Debug(s.qm.logger).Log("msg", "Done flushing.")
    +				}
    +				return
    +			}
    +
    +			queueLength.WithLabelValues(s.qm.queueName).Dec()
    +			pendingSamples = append(pendingSamples, sample)
    +
    +			if len(pendingSamples) >= s.qm.cfg.MaxSamplesPerSend {
    +				s.sendSamples(pendingSamples[:s.qm.cfg.MaxSamplesPerSend])
    +				pendingSamples = pendingSamples[s.qm.cfg.MaxSamplesPerSend:]
    +
    +				stop()
    +				timer.Reset(s.qm.cfg.BatchSendDeadline)
    +			}
    +
    +		case <-timer.C:
    +			if len(pendingSamples) > 0 {
    +				s.sendSamples(pendingSamples)
    +				pendingSamples = pendingSamples[:0]
    +			}
    +			timer.Reset(s.qm.cfg.BatchSendDeadline)
    +		}
    +	}
    +}
    +
    +func (s *shards) sendSamples(samples model.Samples) {
    +	begin := time.Now()
    +	s.sendSamplesWithBackoff(samples)
    +
    +	// These counters are used to calculate the dynamic sharding, and as such
    +	// should be maintained irrespective of success or failure.
    +	s.qm.samplesOut.incr(int64(len(samples)))
    +	s.qm.samplesOutDuration.incr(int64(time.Since(begin)))
    +}
    +
    +// sendSamples to the remote storage with backoff for recoverable errors.
    +func (s *shards) sendSamplesWithBackoff(samples model.Samples) {
    +	backoff := s.qm.cfg.MinBackoff
    +	for retries := s.qm.cfg.MaxRetries; retries > 0; retries-- {
    +		begin := time.Now()
    +		req := ToWriteRequest(samples)
    +		err := s.qm.client.Store(s.ctx, req)
    +
    +		sentBatchDuration.WithLabelValues(s.qm.queueName).Observe(time.Since(begin).Seconds())
    +		if err == nil {
    +			succeededSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
    +			return
    +		}
    +
    +		level.Warn(s.qm.logger).Log("msg", "Error sending samples to remote storage", "count", len(samples), "err", err)
    +		if _, ok := err.(recoverableError); !ok {
    +			break
    +		}
    +		time.Sleep(backoff)
    +		backoff = backoff * 2
    +		if backoff > s.qm.cfg.MaxBackoff {
    +			backoff = s.qm.cfg.MaxBackoff
    +		}
    +	}
    +
    +	failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
    +}
    diff --git a/src/prometheus/storage/remote/queue_manager_test.go b/src/prometheus/storage/remote/queue_manager_test.go
    new file mode 100644
    index 0000000..8216989
    --- /dev/null
    +++ b/src/prometheus/storage/remote/queue_manager_test.go
    @@ -0,0 +1,330 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"context"
    +	"fmt"
    +	"reflect"
    +	"sync"
    +	"sync/atomic"
    +	"testing"
    +	"time"
    +
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/prompb"
    +)
    +
    +const defaultFlushDeadline = 1 * time.Minute
    +
    +type TestStorageClient struct {
    +	receivedSamples map[string][]*prompb.Sample
    +	expectedSamples map[string][]*prompb.Sample
    +	wg              sync.WaitGroup
    +	mtx             sync.Mutex
    +}
    +
    +func NewTestStorageClient() *TestStorageClient {
    +	return &TestStorageClient{
    +		receivedSamples: map[string][]*prompb.Sample{},
    +		expectedSamples: map[string][]*prompb.Sample{},
    +	}
    +}
    +
    +func (c *TestStorageClient) expectSamples(ss model.Samples) {
    +	c.mtx.Lock()
    +	defer c.mtx.Unlock()
    +
    +	c.expectedSamples = map[string][]*prompb.Sample{}
    +	c.receivedSamples = map[string][]*prompb.Sample{}
    +
    +	for _, s := range ss {
    +		ts := labelProtosToLabels(MetricToLabelProtos(s.Metric)).String()
    +		c.expectedSamples[ts] = append(c.expectedSamples[ts], &prompb.Sample{
    +			Timestamp: int64(s.Timestamp),
    +			Value:     float64(s.Value),
    +		})
    +	}
    +	c.wg.Add(len(ss))
    +}
    +
    +func (c *TestStorageClient) waitForExpectedSamples(t *testing.T) {
    +	c.wg.Wait()
    +
    +	c.mtx.Lock()
    +	defer c.mtx.Unlock()
    +	for ts, expectedSamples := range c.expectedSamples {
    +		if !reflect.DeepEqual(expectedSamples, c.receivedSamples[ts]) {
    +			t.Fatalf("%s: Expected %v, got %v", ts, expectedSamples, c.receivedSamples[ts])
    +		}
    +	}
    +}
    +
    +func (c *TestStorageClient) Store(_ context.Context, req *prompb.WriteRequest) error {
    +	c.mtx.Lock()
    +	defer c.mtx.Unlock()
    +	count := 0
    +	for _, ts := range req.Timeseries {
    +		labels := labelProtosToLabels(ts.Labels).String()
    +		for _, sample := range ts.Samples {
    +			count++
    +			c.receivedSamples[labels] = append(c.receivedSamples[labels], sample)
    +		}
    +	}
    +	c.wg.Add(-count)
    +	return nil
    +}
    +
    +func (c *TestStorageClient) Name() string {
    +	return "teststorageclient"
    +}
    +
    +func TestSampleDelivery(t *testing.T) {
    +	// Let's create an even number of send batches so we don't run into the
    +	// batch timeout case.
    +	n := config.DefaultQueueConfig.Capacity * 2
    +
    +	samples := make(model.Samples, 0, n)
    +	for i := 0; i < n; i++ {
    +		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i))
    +		samples = append(samples, &model.Sample{
    +			Metric: model.Metric{
    +				model.MetricNameLabel: name,
    +			},
    +			Value: model.SampleValue(i),
    +		})
    +	}
    +
    +	c := NewTestStorageClient()
    +	c.expectSamples(samples[:len(samples)/2])
    +
    +	cfg := config.DefaultQueueConfig
    +	cfg.MaxShards = 1
    +	m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
    +
    +	// These should be received by the client.
    +	for _, s := range samples[:len(samples)/2] {
    +		m.Append(s)
    +	}
    +	// These will be dropped because the queue is full.
    +	for _, s := range samples[len(samples)/2:] {
    +		m.Append(s)
    +	}
    +	m.Start()
    +	defer m.Stop()
    +
    +	c.waitForExpectedSamples(t)
    +}
    +
    +func TestSampleDeliveryTimeout(t *testing.T) {
    +	// Let's send one less sample than batch size, and wait the timeout duration
    +	n := config.DefaultQueueConfig.Capacity - 1
    +
    +	samples := make(model.Samples, 0, n)
    +	for i := 0; i < n; i++ {
    +		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i))
    +		samples = append(samples, &model.Sample{
    +			Metric: model.Metric{
    +				model.MetricNameLabel: name,
    +			},
    +			Value: model.SampleValue(i),
    +		})
    +	}
    +
    +	c := NewTestStorageClient()
    +
    +	cfg := config.DefaultQueueConfig
    +	cfg.MaxShards = 1
    +	cfg.BatchSendDeadline = 100 * time.Millisecond
    +	m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
    +	m.Start()
    +	defer m.Stop()
    +
    +	// Send the samples twice, waiting for the samples in the meantime.
    +	c.expectSamples(samples)
    +	for _, s := range samples {
    +		m.Append(s)
    +	}
    +	c.waitForExpectedSamples(t)
    +
    +	c.expectSamples(samples)
    +	for _, s := range samples {
    +		m.Append(s)
    +	}
    +	c.waitForExpectedSamples(t)
    +}
    +
    +func TestSampleDeliveryOrder(t *testing.T) {
    +	ts := 10
    +	n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
    +
    +	samples := make(model.Samples, 0, n)
    +	for i := 0; i < n; i++ {
    +		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i%ts))
    +		samples = append(samples, &model.Sample{
    +			Metric: model.Metric{
    +				model.MetricNameLabel: name,
    +			},
    +			Value:     model.SampleValue(i),
    +			Timestamp: model.Time(i),
    +		})
    +	}
    +
    +	c := NewTestStorageClient()
    +	c.expectSamples(samples)
    +	m := NewQueueManager(nil, config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
    +
    +	// These should be received by the client.
    +	for _, s := range samples {
    +		m.Append(s)
    +	}
    +	m.Start()
    +	defer m.Stop()
    +
    +	c.waitForExpectedSamples(t)
    +}
    +
    +// TestBlockingStorageClient is a queue_manager StorageClient which will block
    +// on any calls to Store(), until the `block` channel is closed, at which point
    +// the `numCalls` property will contain a count of how many times Store() was
    +// called.
    +type TestBlockingStorageClient struct {
    +	numCalls uint64
    +	block    chan bool
    +}
    +
    +func NewTestBlockedStorageClient() *TestBlockingStorageClient {
    +	return &TestBlockingStorageClient{
    +		block:    make(chan bool),
    +		numCalls: 0,
    +	}
    +}
    +
    +func (c *TestBlockingStorageClient) Store(ctx context.Context, _ *prompb.WriteRequest) error {
    +	atomic.AddUint64(&c.numCalls, 1)
    +	select {
    +	case <-c.block:
    +	case <-ctx.Done():
    +	}
    +	return nil
    +}
    +
    +func (c *TestBlockingStorageClient) NumCalls() uint64 {
    +	return atomic.LoadUint64(&c.numCalls)
    +}
    +
    +func (c *TestBlockingStorageClient) unlock() {
    +	close(c.block)
    +}
    +
    +func (c *TestBlockingStorageClient) Name() string {
    +	return "testblockingstorageclient"
    +}
    +
    +func (t *QueueManager) queueLen() int {
    +	t.shardsMtx.Lock()
    +	defer t.shardsMtx.Unlock()
    +	queueLength := 0
    +	for _, shard := range t.shards.queues {
    +		queueLength += len(shard)
    +	}
    +	return queueLength
    +}
    +
    +func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
    +	// Our goal is to fully empty the queue:
    +	// `MaxSamplesPerSend*Shards` samples should be consumed by the
    +	// per-shard goroutines, and then another `MaxSamplesPerSend`
    +	// should be left on the queue.
    +	n := config.DefaultQueueConfig.MaxSamplesPerSend * 2
    +
    +	samples := make(model.Samples, 0, n)
    +	for i := 0; i < n; i++ {
    +		name := model.LabelValue(fmt.Sprintf("test_metric_%d", i))
    +		samples = append(samples, &model.Sample{
    +			Metric: model.Metric{
    +				model.MetricNameLabel: name,
    +			},
    +			Value: model.SampleValue(i),
    +		})
    +	}
    +
    +	c := NewTestBlockedStorageClient()
    +	cfg := config.DefaultQueueConfig
    +	cfg.MaxShards = 1
    +	cfg.Capacity = n
    +	m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
    +
    +	m.Start()
    +
    +	defer func() {
    +		c.unlock()
    +		m.Stop()
    +	}()
    +
    +	for _, s := range samples {
    +		m.Append(s)
    +	}
    +
    +	// Wait until the runShard() loops drain the queue.  If things went right, it
    +	// should then immediately block in sendSamples(), but, in case of error,
    +	// it would spawn too many goroutines, and thus we'd see more calls to
    +	// client.Store()
    +	//
    +	// The timed wait is maybe non-ideal, but, in order to verify that we're
    +	// not spawning too many concurrent goroutines, we have to wait on the
    +	// Run() loop to consume a specific number of elements from the
    +	// queue... and it doesn't signal that in any obvious way, except by
    +	// draining the queue.  We cap the waiting at 1 second -- that should give
    +	// plenty of time, and keeps the failure fairly quick if we're not draining
    +	// the queue properly.
    +	for i := 0; i < 100 && m.queueLen() > 0; i++ {
    +		time.Sleep(10 * time.Millisecond)
    +	}
    +
    +	if m.queueLen() != config.DefaultQueueConfig.MaxSamplesPerSend {
    +		t.Fatalf("Failed to drain QueueManager queue, %d elements left",
    +			m.queueLen(),
    +		)
    +	}
    +
    +	numCalls := c.NumCalls()
    +	if numCalls != uint64(1) {
    +		t.Errorf("Saw %d concurrent sends, expected 1", numCalls)
    +	}
    +}
    +
    +func TestShutdown(t *testing.T) {
    +	deadline := 10 * time.Second
    +	c := NewTestBlockedStorageClient()
    +	m := NewQueueManager(nil, config.DefaultQueueConfig, nil, nil, c, deadline)
    +	for i := 0; i < config.DefaultQueueConfig.MaxSamplesPerSend; i++ {
    +		m.Append(&model.Sample{
    +			Metric: model.Metric{
    +				model.MetricNameLabel: model.LabelValue(fmt.Sprintf("test_metric_%d", i)),
    +			},
    +			Value:     model.SampleValue(i),
    +			Timestamp: model.Time(i),
    +		})
    +	}
    +	m.Start()
    +
    +	start := time.Now()
    +	m.Stop()
    +	duration := time.Now().Sub(start)
    +	if duration > deadline+(deadline/10) {
    +		t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
    +	}
    +}
    diff --git a/src/prometheus/storage/remote/read.go b/src/prometheus/storage/remote/read.go
    new file mode 100644
    index 0000000..c99a8a9
    --- /dev/null
    +++ b/src/prometheus/storage/remote/read.go
    @@ -0,0 +1,226 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"context"
    +
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// QueryableClient returns a storage.Queryable which queries the given
    +// Client to select series sets.
    +func QueryableClient(c *Client) storage.Queryable {
    +	return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +		return &querier{
    +			ctx:    ctx,
    +			mint:   mint,
    +			maxt:   maxt,
    +			client: c,
    +		}, nil
    +	})
    +}
    +
    +// querier is an adapter to make a Client usable as a storage.Querier.
    +type querier struct {
    +	ctx        context.Context
    +	mint, maxt int64
    +	client     *Client
    +}
    +
    +// Select implements storage.Querier and uses the given matchers to read series
    +// sets from the Client.
    +func (q *querier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) {
    +	query, err := ToQuery(q.mint, q.maxt, matchers, p)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	res, err := q.client.Read(q.ctx, query)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return FromQueryResult(res), nil
    +}
    +
    +// LabelValues implements storage.Querier and is a noop.
    +func (q *querier) LabelValues(name string) ([]string, error) {
    +	// TODO implement?
    +	return nil, nil
    +}
    +
    +// Close implements storage.Querier and is a noop.
    +func (q *querier) Close() error {
    +	return nil
    +}
    +
    +// ExternablLabelsHandler returns a storage.Queryable which creates a
    +// externalLabelsQuerier.
    +func ExternablLabelsHandler(next storage.Queryable, externalLabels model.LabelSet) storage.Queryable {
    +	return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +		q, err := next.Querier(ctx, mint, maxt)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return &externalLabelsQuerier{Querier: q, externalLabels: externalLabels}, nil
    +	})
    +}
    +
    +// externalLabelsQuerier is a querier which ensures that Select() results match
    +// the configured external labels.
    +type externalLabelsQuerier struct {
    +	storage.Querier
    +
    +	externalLabels model.LabelSet
    +}
    +
    +// Select adds equality matchers for all external labels to the list of matchers
    +// before calling the wrapped storage.Queryable. The added external labels are
    +// removed from the returned series sets.
    +func (q externalLabelsQuerier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) {
    +	m, added := q.addExternalLabels(matchers)
    +	s, err := q.Querier.Select(p, m...)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return newSeriesSetFilter(s, added), nil
    +}
    +
    +// PreferLocalStorageFilter returns a QueryableFunc which creates a NoopQuerier
    +// if requested timeframe can be answered completely by the local TSDB, and
    +// reduces maxt if the timeframe can be partially answered by TSDB.
    +func PreferLocalStorageFilter(next storage.Queryable, cb startTimeCallback) storage.Queryable {
    +	return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +		localStartTime, err := cb()
    +		if err != nil {
    +			return nil, err
    +		}
    +		cmaxt := maxt
    +		// Avoid queries whose timerange is later than the first timestamp in local DB.
    +		if mint > localStartTime {
    +			return storage.NoopQuerier(), nil
    +		}
    +		// Query only samples older than the first timestamp in local DB.
    +		if maxt > localStartTime {
    +			cmaxt = localStartTime
    +		}
    +		return next.Querier(ctx, mint, cmaxt)
    +	})
    +}
    +
    +// RequiredMatchersFilter returns a storage.Queryable which creates a
    +// requiredMatchersQuerier.
    +func RequiredMatchersFilter(next storage.Queryable, required []*labels.Matcher) storage.Queryable {
    +	return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +		q, err := next.Querier(ctx, mint, maxt)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return &requiredMatchersQuerier{Querier: q, requiredMatchers: required}, nil
    +	})
    +}
    +
    +// requiredMatchersQuerier wraps a storage.Querier and requires Select() calls
    +// to match the given labelSet.
    +type requiredMatchersQuerier struct {
    +	storage.Querier
    +
    +	requiredMatchers []*labels.Matcher
    +}
    +
    +// Select returns a NoopSeriesSet if the given matchers don't match the label
    +// set of the requiredMatchersQuerier. Otherwise it'll call the wrapped querier.
    +func (q requiredMatchersQuerier) Select(p *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) {
    +	ms := q.requiredMatchers
    +	for _, m := range matchers {
    +		for i, r := range ms {
    +			if m.Type == labels.MatchEqual && m.Name == r.Name && m.Value == r.Value {
    +				ms = append(ms[:i], ms[i+1:]...)
    +				break
    +			}
    +		}
    +		if len(ms) == 0 {
    +			break
    +		}
    +	}
    +	if len(ms) > 0 {
    +		return storage.NoopSeriesSet(), nil
    +	}
    +	return q.Querier.Select(p, matchers...)
    +}
    +
    +// addExternalLabels adds matchers for each external label. External labels
    +// that already have a corresponding user-supplied matcher are skipped, as we
    +// assume that the user explicitly wants to select a different value for them.
    +// We return the new set of matchers, along with a map of labels for which
    +// matchers were added, so that these can later be removed from the result
    +// time series again.
    +func (q externalLabelsQuerier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, model.LabelSet) {
    +	el := make(model.LabelSet, len(q.externalLabels))
    +	for k, v := range q.externalLabels {
    +		el[k] = v
    +	}
    +	for _, m := range ms {
    +		if _, ok := el[model.LabelName(m.Name)]; ok {
    +			delete(el, model.LabelName(m.Name))
    +		}
    +	}
    +	for k, v := range el {
    +		m, err := labels.NewMatcher(labels.MatchEqual, string(k), string(v))
    +		if err != nil {
    +			panic(err)
    +		}
    +		ms = append(ms, m)
    +	}
    +	return ms, el
    +}
    +
    +func newSeriesSetFilter(ss storage.SeriesSet, toFilter model.LabelSet) storage.SeriesSet {
    +	return &seriesSetFilter{
    +		SeriesSet: ss,
    +		toFilter:  toFilter,
    +	}
    +}
    +
    +type seriesSetFilter struct {
    +	storage.SeriesSet
    +	toFilter model.LabelSet
    +}
    +
    +func (ssf seriesSetFilter) At() storage.Series {
    +	return seriesFilter{
    +		Series:   ssf.SeriesSet.At(),
    +		toFilter: ssf.toFilter,
    +	}
    +}
    +
    +type seriesFilter struct {
    +	storage.Series
    +	toFilter model.LabelSet
    +}
    +
    +func (sf seriesFilter) Labels() labels.Labels {
    +	labels := sf.Series.Labels()
    +	for i := 0; i < len(labels); {
    +		if _, ok := sf.toFilter[model.LabelName(labels[i].Name)]; ok {
    +			labels = labels[:i+copy(labels[i:], labels[i+1:])]
    +			continue
    +		}
    +		i++
    +	}
    +	return labels
    +}
    diff --git a/src/prometheus/storage/remote/read_test.go b/src/prometheus/storage/remote/read_test.go
    new file mode 100644
    index 0000000..537f605
    --- /dev/null
    +++ b/src/prometheus/storage/remote/read_test.go
    @@ -0,0 +1,327 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"context"
    +	"reflect"
    +	"sort"
    +	"testing"
    +
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/prompb"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +func mustNewLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher {
    +	m, err := labels.NewMatcher(mt, name, val)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return m
    +}
    +
    +func TestExternalLabelsQuerierSelect(t *testing.T) {
    +	matchers := []*labels.Matcher{
    +		mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +	}
    +	q := &externalLabelsQuerier{
    +		Querier:        mockQuerier{},
    +		externalLabels: model.LabelSet{"region": "europe"},
    +	}
    +	want := newSeriesSetFilter(mockSeriesSet{}, q.externalLabels)
    +	have, err := q.Select(nil, matchers...)
    +	if err != nil {
    +		t.Error(err)
    +	}
    +	if !reflect.DeepEqual(want, have) {
    +		t.Errorf("expected series set %+v, got %+v", want, have)
    +	}
    +}
    +
    +func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
    +	tests := []struct {
    +		el          model.LabelSet
    +		inMatchers  []*labels.Matcher
    +		outMatchers []*labels.Matcher
    +		added       model.LabelSet
    +	}{
    +		{
    +			el: model.LabelSet{},
    +			inMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +			},
    +			outMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +			},
    +			added: model.LabelSet{},
    +		},
    +		{
    +			el: model.LabelSet{"region": "europe", "dc": "berlin-01"},
    +			inMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +			},
    +			outMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +				mustNewLabelMatcher(labels.MatchEqual, "region", "europe"),
    +				mustNewLabelMatcher(labels.MatchEqual, "dc", "berlin-01"),
    +			},
    +			added: model.LabelSet{"region": "europe", "dc": "berlin-01"},
    +		},
    +		{
    +			el: model.LabelSet{"region": "europe", "dc": "berlin-01"},
    +			inMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +				mustNewLabelMatcher(labels.MatchEqual, "dc", "munich-02"),
    +			},
    +			outMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "job", "api-server"),
    +				mustNewLabelMatcher(labels.MatchEqual, "region", "europe"),
    +				mustNewLabelMatcher(labels.MatchEqual, "dc", "munich-02"),
    +			},
    +			added: model.LabelSet{"region": "europe"},
    +		},
    +	}
    +
    +	for i, test := range tests {
    +		q := &externalLabelsQuerier{Querier: mockQuerier{}, externalLabels: test.el}
    +		matchers, added := q.addExternalLabels(test.inMatchers)
    +
    +		sort.Slice(test.outMatchers, func(i, j int) bool { return test.outMatchers[i].Name < test.outMatchers[j].Name })
    +		sort.Slice(matchers, func(i, j int) bool { return matchers[i].Name < matchers[j].Name })
    +
    +		if !reflect.DeepEqual(matchers, test.outMatchers) {
    +			t.Fatalf("%d. unexpected matchers; want %v, got %v", i, test.outMatchers, matchers)
    +		}
    +		if !reflect.DeepEqual(added, test.added) {
    +			t.Fatalf("%d. unexpected added labels; want %v, got %v", i, test.added, added)
    +		}
    +	}
    +}
    +
    +func TestSeriesSetFilter(t *testing.T) {
    +	tests := []struct {
    +		in       *prompb.QueryResult
    +		toRemove model.LabelSet
    +
    +		expected *prompb.QueryResult
    +	}{
    +		{
    +			toRemove: model.LabelSet{"foo": "bar"},
    +			in: &prompb.QueryResult{
    +				Timeseries: []*prompb.TimeSeries{
    +					{Labels: labelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b")), Samples: []*prompb.Sample{}},
    +				},
    +			},
    +			expected: &prompb.QueryResult{
    +				Timeseries: []*prompb.TimeSeries{
    +					{Labels: labelsToLabelsProto(labels.FromStrings("a", "b")), Samples: []*prompb.Sample{}},
    +				},
    +			},
    +		},
    +	}
    +
    +	for i, tc := range tests {
    +		filtered := newSeriesSetFilter(FromQueryResult(tc.in), tc.toRemove)
    +		have, err := ToQueryResult(filtered)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +
    +		if !reflect.DeepEqual(have, tc.expected) {
    +			t.Fatalf("%d. unexpected labels; want %v, got %v", i, tc.expected, have)
    +		}
    +	}
    +}
    +
    +type mockQuerier struct {
    +	ctx        context.Context
    +	mint, maxt int64
    +
    +	storage.Querier
    +}
    +
    +type mockSeriesSet struct {
    +	storage.SeriesSet
    +}
    +
    +func (mockQuerier) Select(*storage.SelectParams, ...*labels.Matcher) (storage.SeriesSet, error) {
    +	return mockSeriesSet{}, nil
    +}
    +
    +func TestPreferLocalStorageFilter(t *testing.T) {
    +	ctx := context.Background()
    +
    +	tests := []struct {
    +		localStartTime int64
    +		mint           int64
    +		maxt           int64
    +		querier        storage.Querier
    +	}{
    +		{
    +			localStartTime: int64(100),
    +			mint:           int64(0),
    +			maxt:           int64(50),
    +			querier:        mockQuerier{ctx: ctx, mint: 0, maxt: 50},
    +		},
    +		{
    +			localStartTime: int64(20),
    +			mint:           int64(0),
    +			maxt:           int64(50),
    +			querier:        mockQuerier{ctx: ctx, mint: 0, maxt: 20},
    +		},
    +		{
    +			localStartTime: int64(20),
    +			mint:           int64(30),
    +			maxt:           int64(50),
    +			querier:        storage.NoopQuerier(),
    +		},
    +	}
    +
    +	for i, test := range tests {
    +		f := PreferLocalStorageFilter(
    +			storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +				return mockQuerier{ctx: ctx, mint: mint, maxt: maxt}, nil
    +			}),
    +			func() (int64, error) { return test.localStartTime, nil },
    +		)
    +
    +		q, err := f.Querier(ctx, test.mint, test.maxt)
    +		if err != nil {
    +			t.Fatal(err)
    +		}
    +
    +		if test.querier != q {
    +			t.Errorf("%d. expected quierer %+v, got %+v", i, test.querier, q)
    +		}
    +	}
    +}
    +
    +func TestRequiredMatchersFilter(t *testing.T) {
    +	ctx := context.Background()
    +
    +	f := RequiredMatchersFilter(
    +		storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +			return mockQuerier{ctx: ctx, mint: mint, maxt: maxt}, nil
    +		}),
    +		[]*labels.Matcher{mustNewLabelMatcher(labels.MatchEqual, "special", "label")},
    +	)
    +
    +	want := &requiredMatchersQuerier{
    +		Querier:          mockQuerier{ctx: ctx, mint: 0, maxt: 50},
    +		requiredMatchers: []*labels.Matcher{mustNewLabelMatcher(labels.MatchEqual, "special", "label")},
    +	}
    +	have, err := f.Querier(ctx, 0, 50)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +
    +	if !reflect.DeepEqual(want, have) {
    +		t.Errorf("expected quierer %+v, got %+v", want, have)
    +	}
    +}
    +
    +func TestRequiredLabelsQuerierSelect(t *testing.T) {
    +	tests := []struct {
    +		requiredMatchers []*labels.Matcher
    +		matchers         []*labels.Matcher
    +		seriesSet        storage.SeriesSet
    +	}{
    +		{
    +			requiredMatchers: []*labels.Matcher{},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +			},
    +			seriesSet: mockSeriesSet{},
    +		},
    +		{
    +			requiredMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +			},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +			},
    +			seriesSet: mockSeriesSet{},
    +		},
    +		{
    +			requiredMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +			},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchRegexp, "special", "label"),
    +			},
    +			seriesSet: storage.NoopSeriesSet(),
    +		},
    +		{
    +			requiredMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +			},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "different"),
    +			},
    +			seriesSet: storage.NoopSeriesSet(),
    +		},
    +		{
    +			requiredMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +			},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +				mustNewLabelMatcher(labels.MatchEqual, "foo", "bar"),
    +			},
    +			seriesSet: mockSeriesSet{},
    +		},
    +		{
    +			requiredMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +				mustNewLabelMatcher(labels.MatchEqual, "foo", "bar"),
    +			},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +				mustNewLabelMatcher(labels.MatchEqual, "foo", "baz"),
    +			},
    +			seriesSet: storage.NoopSeriesSet(),
    +		},
    +		{
    +			requiredMatchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +				mustNewLabelMatcher(labels.MatchEqual, "foo", "bar"),
    +			},
    +			matchers: []*labels.Matcher{
    +				mustNewLabelMatcher(labels.MatchEqual, "special", "label"),
    +				mustNewLabelMatcher(labels.MatchEqual, "foo", "bar"),
    +			},
    +			seriesSet: mockSeriesSet{},
    +		},
    +	}
    +
    +	for i, test := range tests {
    +		q := &requiredMatchersQuerier{
    +			Querier:          mockQuerier{},
    +			requiredMatchers: test.requiredMatchers,
    +		}
    +
    +		have, err := q.Select(nil, test.matchers...)
    +		if err != nil {
    +			t.Error(err)
    +		}
    +		if want := test.seriesSet; want != have {
    +			t.Errorf("%d. expected series set %+v, got %+v", i, want, have)
    +		}
    +		if want, have := test.requiredMatchers, q.requiredMatchers; !reflect.DeepEqual(want, have) {
    +			t.Errorf("%d. requiredMatchersQuerier.Select() has modified the matchers", i)
    +		}
    +	}
    +}
    diff --git a/src/prometheus/storage/remote/storage.go b/src/prometheus/storage/remote/storage.go
    new file mode 100644
    index 0000000..820d0ff
    --- /dev/null
    +++ b/src/prometheus/storage/remote/storage.go
    @@ -0,0 +1,168 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"context"
    +	"sync"
    +	"time"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/config"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// Callback func that return the oldest timestamp stored in a storage.
    +type startTimeCallback func() (int64, error)
    +
    +// Storage represents all the remote read and write endpoints.  It implements
    +// storage.Storage.
    +type Storage struct {
    +	logger log.Logger
    +	mtx    sync.RWMutex
    +
    +	// For writes
    +	queues []*QueueManager
    +
    +	// For reads
    +	queryables             []storage.Queryable
    +	localStartTimeCallback startTimeCallback
    +	flushDeadline          time.Duration
    +}
    +
    +// NewStorage returns a remote.Storage.
    +func NewStorage(l log.Logger, stCallback startTimeCallback, flushDeadline time.Duration) *Storage {
    +	if l == nil {
    +		l = log.NewNopLogger()
    +	}
    +	return &Storage{
    +		logger:                 l,
    +		localStartTimeCallback: stCallback,
    +		flushDeadline:          flushDeadline,
    +	}
    +}
    +
    +// ApplyConfig updates the state as the new config requires.
    +func (s *Storage) ApplyConfig(conf *config.Config) error {
    +	s.mtx.Lock()
    +	defer s.mtx.Unlock()
    +
    +	// Update write queues
    +
    +	newQueues := []*QueueManager{}
    +	// TODO: we should only stop & recreate queues which have changes,
    +	// as this can be quite disruptive.
    +	for i, rwConf := range conf.RemoteWriteConfigs {
    +		c, err := NewClient(i, &ClientConfig{
    +			URL:              rwConf.URL,
    +			Timeout:          rwConf.RemoteTimeout,
    +			HTTPClientConfig: rwConf.HTTPClientConfig,
    +		})
    +		if err != nil {
    +			return err
    +		}
    +		newQueues = append(newQueues, NewQueueManager(
    +			s.logger,
    +			rwConf.QueueConfig,
    +			conf.GlobalConfig.ExternalLabels,
    +			rwConf.WriteRelabelConfigs,
    +			c,
    +			s.flushDeadline,
    +		))
    +	}
    +
    +	for _, q := range s.queues {
    +		q.Stop()
    +	}
    +
    +	s.queues = newQueues
    +	for _, q := range s.queues {
    +		q.Start()
    +	}
    +
    +	// Update read clients
    +
    +	s.queryables = make([]storage.Queryable, 0, len(conf.RemoteReadConfigs))
    +	for i, rrConf := range conf.RemoteReadConfigs {
    +		c, err := NewClient(i, &ClientConfig{
    +			URL:              rrConf.URL,
    +			Timeout:          rrConf.RemoteTimeout,
    +			HTTPClientConfig: rrConf.HTTPClientConfig,
    +		})
    +		if err != nil {
    +			return err
    +		}
    +
    +		q := QueryableClient(c)
    +		q = ExternablLabelsHandler(q, conf.GlobalConfig.ExternalLabels)
    +		if len(rrConf.RequiredMatchers) > 0 {
    +			q = RequiredMatchersFilter(q, labelsToEqualityMatchers(rrConf.RequiredMatchers))
    +		}
    +		if !rrConf.ReadRecent {
    +			q = PreferLocalStorageFilter(q, s.localStartTimeCallback)
    +		}
    +		s.queryables = append(s.queryables, q)
    +	}
    +
    +	return nil
    +}
    +
    +// StartTime implements the Storage interface.
    +func (s *Storage) StartTime() (int64, error) {
    +	return int64(model.Latest), nil
    +}
    +
    +// Querier returns a storage.MergeQuerier combining the remote client queriers
    +// of each configured remote read endpoint.
    +func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +	s.mtx.Lock()
    +	queryables := s.queryables
    +	s.mtx.Unlock()
    +
    +	queriers := make([]storage.Querier, 0, len(queryables))
    +	for _, queryable := range queryables {
    +		q, err := queryable.Querier(ctx, mint, maxt)
    +		if err != nil {
    +			return nil, err
    +		}
    +		queriers = append(queriers, q)
    +	}
    +	return storage.NewMergeQuerier(queriers), nil
    +}
    +
    +// Close the background processing of the storage queues.
    +func (s *Storage) Close() error {
    +	s.mtx.Lock()
    +	defer s.mtx.Unlock()
    +
    +	for _, q := range s.queues {
    +		q.Stop()
    +	}
    +
    +	return nil
    +}
    +
    +func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher {
    +	ms := make([]*labels.Matcher, 0, len(ls))
    +	for k, v := range ls {
    +		ms = append(ms, &labels.Matcher{
    +			Type:  labels.MatchEqual,
    +			Name:  string(k),
    +			Value: string(v),
    +		})
    +	}
    +	return ms
    +}
    diff --git a/src/prometheus/storage/remote/write.go b/src/prometheus/storage/remote/write.go
    new file mode 100644
    index 0000000..7427b3b
    --- /dev/null
    +++ b/src/prometheus/storage/remote/write.go
    @@ -0,0 +1,55 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package remote
    +
    +import (
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/storage"
    +)
    +
    +// Appender implements scrape.Appendable.
    +func (s *Storage) Appender() (storage.Appender, error) {
    +	return s, nil
    +}
    +
    +// Add implements storage.Appender.
    +func (s *Storage) Add(l labels.Labels, t int64, v float64) (uint64, error) {
    +	s.mtx.RLock()
    +	defer s.mtx.RUnlock()
    +	for _, q := range s.queues {
    +		q.Append(&model.Sample{
    +			Metric:    labelsToMetric(l),
    +			Timestamp: model.Time(t),
    +			Value:     model.SampleValue(v),
    +		})
    +	}
    +	return 0, nil
    +}
    +
    +// AddFast implements storage.Appender.
    +func (s *Storage) AddFast(l labels.Labels, _ uint64, t int64, v float64) error {
    +	_, err := s.Add(l, t, v)
    +	return err
    +}
    +
    +// Commit implements storage.Appender.
    +func (*Storage) Commit() error {
    +	return nil
    +}
    +
    +// Rollback implements storage.Appender.
    +func (*Storage) Rollback() error {
    +	return nil
    +}
    diff --git a/src/prometheus/storage/tsdb/tsdb.go b/src/prometheus/storage/tsdb/tsdb.go
    new file mode 100644
    index 0000000..92e8cad
    --- /dev/null
    +++ b/src/prometheus/storage/tsdb/tsdb.go
    @@ -0,0 +1,292 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package tsdb
    +
    +import (
    +	"context"
    +	"sync"
    +	"time"
    +	"unsafe"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/pkg/errors"
    +	"github.com/prometheus/client_golang/prometheus"
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/storage"
    +	"github.com/prometheus/tsdb"
    +	tsdbLabels "github.com/prometheus/tsdb/labels"
    +)
    +
    +// ErrNotReady is returned if the underlying storage is not ready yet.
    +var ErrNotReady = errors.New("TSDB not ready")
    +
    +// ReadyStorage implements the Storage interface while allowing to set the actual
    +// storage at a later point in time.
    +type ReadyStorage struct {
    +	mtx sync.RWMutex
    +	a   *adapter
    +}
    +
    +// Set the storage.
    +func (s *ReadyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
    +	s.mtx.Lock()
    +	defer s.mtx.Unlock()
    +
    +	s.a = &adapter{db: db, startTimeMargin: startTimeMargin}
    +}
    +
    +// Get the storage.
    +func (s *ReadyStorage) Get() *tsdb.DB {
    +	if x := s.get(); x != nil {
    +		return x.db
    +	}
    +	return nil
    +}
    +
    +func (s *ReadyStorage) get() *adapter {
    +	s.mtx.RLock()
    +	x := s.a
    +	s.mtx.RUnlock()
    +	return x
    +}
    +
    +// StartTime implements the Storage interface.
    +func (s *ReadyStorage) StartTime() (int64, error) {
    +	if x := s.get(); x != nil {
    +		return x.StartTime()
    +	}
    +	return int64(model.Latest), ErrNotReady
    +}
    +
    +// Querier implements the Storage interface.
    +func (s *ReadyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
    +	if x := s.get(); x != nil {
    +		return x.Querier(ctx, mint, maxt)
    +	}
    +	return nil, ErrNotReady
    +}
    +
    +// Appender implements the Storage interface.
    +func (s *ReadyStorage) Appender() (storage.Appender, error) {
    +	if x := s.get(); x != nil {
    +		return x.Appender()
    +	}
    +	return nil, ErrNotReady
    +}
    +
    +// Close implements the Storage interface.
    +func (s *ReadyStorage) Close() error {
    +	if x := s.Get(); x != nil {
    +		return x.Close()
    +	}
    +	return nil
    +}
    +
    +// Adapter return an adapter as storage.Storage.
    +func Adapter(db *tsdb.DB, startTimeMargin int64) storage.Storage {
    +	return &adapter{db: db, startTimeMargin: startTimeMargin}
    +}
    +
    +// adapter implements a storage.Storage around TSDB.
    +type adapter struct {
    +	db              *tsdb.DB
    +	startTimeMargin int64
    +}
    +
    +// Options of the DB storage.
    +type Options struct {
    +	// The interval at which the write ahead log is flushed to disc.
    +	WALFlushInterval time.Duration
    +
    +	// The timestamp range of head blocks after which they get persisted.
    +	// It's the minimum duration of any persisted block.
    +	MinBlockDuration model.Duration
    +
    +	// The maximum timestamp range of compacted blocks.
    +	MaxBlockDuration model.Duration
    +
    +	// Duration for how long to retain data.
    +	Retention model.Duration
    +
    +	// Disable creation and consideration of lockfile.
    +	NoLockfile bool
    +}
    +
    +// Open returns a new storage backed by a TSDB database that is configured for Prometheus.
    +func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) {
    +	if opts.MinBlockDuration > opts.MaxBlockDuration {
    +		opts.MaxBlockDuration = opts.MinBlockDuration
    +	}
    +	// Start with smallest block duration and create exponential buckets until the exceed the
    +	// configured maximum block duration.
    +	rngs := tsdb.ExponentialBlockRanges(int64(time.Duration(opts.MinBlockDuration).Seconds()*1000), 10, 3)
    +
    +	for i, v := range rngs {
    +		if v > int64(time.Duration(opts.MaxBlockDuration).Seconds()*1000) {
    +			rngs = rngs[:i]
    +			break
    +		}
    +	}
    +
    +	db, err := tsdb.Open(path, l, r, &tsdb.Options{
    +		WALFlushInterval:  10 * time.Second,
    +		RetentionDuration: uint64(time.Duration(opts.Retention).Seconds() * 1000),
    +		BlockRanges:       rngs,
    +		NoLockfile:        opts.NoLockfile,
    +	})
    +	if err != nil {
    +		return nil, err
    +	}
    +	return db, nil
    +}
    +
    +// StartTime implements the Storage interface.
    +func (a adapter) StartTime() (int64, error) {
    +	var startTime int64
    +
    +	if len(a.db.Blocks()) > 0 {
    +		startTime = a.db.Blocks()[0].Meta().MinTime
    +	} else {
    +		startTime = time.Now().Unix() * 1000
    +	}
    +
    +	// Add a safety margin as it may take a few minutes for everything to spin up.
    +	return startTime + a.startTimeMargin, nil
    +}
    +
    +func (a adapter) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
    +	q, err := a.db.Querier(mint, maxt)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return querier{q: q}, nil
    +}
    +
    +// Appender returns a new appender against the storage.
    +func (a adapter) Appender() (storage.Appender, error) {
    +	return appender{a: a.db.Appender()}, nil
    +}
    +
    +// Close closes the storage and all its underlying resources.
    +func (a adapter) Close() error {
    +	return a.db.Close()
    +}
    +
    +type querier struct {
    +	q tsdb.Querier
    +}
    +
    +func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storage.SeriesSet, error) {
    +	ms := make([]tsdbLabels.Matcher, 0, len(oms))
    +
    +	for _, om := range oms {
    +		ms = append(ms, convertMatcher(om))
    +	}
    +	set, err := q.q.Select(ms...)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return seriesSet{set: set}, nil
    +}
    +
    +func (q querier) LabelValues(name string) ([]string, error) { return q.q.LabelValues(name) }
    +func (q querier) Close() error                              { return q.q.Close() }
    +
    +type seriesSet struct {
    +	set tsdb.SeriesSet
    +}
    +
    +func (s seriesSet) Next() bool         { return s.set.Next() }
    +func (s seriesSet) Err() error         { return s.set.Err() }
    +func (s seriesSet) At() storage.Series { return series{s: s.set.At()} }
    +
    +type series struct {
    +	s tsdb.Series
    +}
    +
    +func (s series) Labels() labels.Labels            { return toLabels(s.s.Labels()) }
    +func (s series) Iterator() storage.SeriesIterator { return storage.SeriesIterator(s.s.Iterator()) }
    +
    +type appender struct {
    +	a tsdb.Appender
    +}
    +
    +func (a appender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
    +	ref, err := a.a.Add(toTSDBLabels(lset), t, v)
    +
    +	switch errors.Cause(err) {
    +	case tsdb.ErrNotFound:
    +		return 0, storage.ErrNotFound
    +	case tsdb.ErrOutOfOrderSample:
    +		return 0, storage.ErrOutOfOrderSample
    +	case tsdb.ErrAmendSample:
    +		return 0, storage.ErrDuplicateSampleForTimestamp
    +	case tsdb.ErrOutOfBounds:
    +		return 0, storage.ErrOutOfBounds
    +	}
    +	return ref, err
    +}
    +
    +func (a appender) AddFast(_ labels.Labels, ref uint64, t int64, v float64) error {
    +	err := a.a.AddFast(ref, t, v)
    +
    +	switch errors.Cause(err) {
    +	case tsdb.ErrNotFound:
    +		return storage.ErrNotFound
    +	case tsdb.ErrOutOfOrderSample:
    +		return storage.ErrOutOfOrderSample
    +	case tsdb.ErrAmendSample:
    +		return storage.ErrDuplicateSampleForTimestamp
    +	case tsdb.ErrOutOfBounds:
    +		return storage.ErrOutOfBounds
    +	}
    +	return err
    +}
    +
    +func (a appender) Commit() error   { return a.a.Commit() }
    +func (a appender) Rollback() error { return a.a.Rollback() }
    +
    +func convertMatcher(m *labels.Matcher) tsdbLabels.Matcher {
    +	switch m.Type {
    +	case labels.MatchEqual:
    +		return tsdbLabels.NewEqualMatcher(m.Name, m.Value)
    +
    +	case labels.MatchNotEqual:
    +		return tsdbLabels.Not(tsdbLabels.NewEqualMatcher(m.Name, m.Value))
    +
    +	case labels.MatchRegexp:
    +		res, err := tsdbLabels.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$")
    +		if err != nil {
    +			panic(err)
    +		}
    +		return res
    +
    +	case labels.MatchNotRegexp:
    +		res, err := tsdbLabels.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$")
    +		if err != nil {
    +			panic(err)
    +		}
    +		return tsdbLabels.Not(res)
    +	}
    +	panic("storage.convertMatcher: invalid matcher type")
    +}
    +
    +func toTSDBLabels(l labels.Labels) tsdbLabels.Labels {
    +	return *(*tsdbLabels.Labels)(unsafe.Pointer(&l))
    +}
    +
    +func toLabels(l tsdbLabels.Labels) labels.Labels {
    +	return *(*labels.Labels)(unsafe.Pointer(&l))
    +}
    diff --git a/src/prometheus/template/template.go b/src/prometheus/template/template.go
    new file mode 100644
    index 0000000..91ff9d5
    --- /dev/null
    +++ b/src/prometheus/template/template.go
    @@ -0,0 +1,317 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package template
    +
    +import (
    +	"bytes"
    +	"context"
    +	"errors"
    +	"fmt"
    +	"math"
    +	"net/url"
    +	"regexp"
    +	"sort"
    +	"strings"
    +	"time"
    +
    +	html_template "html/template"
    +	text_template "text/template"
    +
    +	"github.com/prometheus/common/model"
    +
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/util/strutil"
    +)
    +
    +// A version of vector that's easier to use from templates.
    +type sample struct {
    +	Labels map[string]string
    +	Value  float64
    +}
    +type queryResult []*sample
    +
    +type queryResultByLabelSorter struct {
    +	results queryResult
    +	by      string
    +}
    +
    +func (q queryResultByLabelSorter) Len() int {
    +	return len(q.results)
    +}
    +
    +func (q queryResultByLabelSorter) Less(i, j int) bool {
    +	return q.results[i].Labels[q.by] < q.results[j].Labels[q.by]
    +}
    +
    +func (q queryResultByLabelSorter) Swap(i, j int) {
    +	q.results[i], q.results[j] = q.results[j], q.results[i]
    +}
    +
    +// QueryFunc executes a PromQL query at the given time.
    +type QueryFunc func(context.Context, string, time.Time) (promql.Vector, error)
    +
    +func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (queryResult, error) {
    +	vector, err := queryFn(ctx, q, ts)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	// promql.Vector is hard to work with in templates, so convert to
    +	// base data types.
    +	// TODO(fabxc): probably not true anymore after type rework.
    +	var result = make(queryResult, len(vector))
    +	for n, v := range vector {
    +		s := sample{
    +			Value:  v.V,
    +			Labels: v.Metric.Map(),
    +		}
    +		result[n] = &s
    +	}
    +	return result, nil
    +}
    +
    +// Expander executes templates in text or HTML mode with a common set of Prometheus template functions.
    +type Expander struct {
    +	text    string
    +	name    string
    +	data    interface{}
    +	funcMap text_template.FuncMap
    +}
    +
    +// NewTemplateExpander returns a template expander ready to use.
    +func NewTemplateExpander(
    +	ctx context.Context,
    +	text string,
    +	name string,
    +	data interface{},
    +	timestamp model.Time,
    +	queryFunc QueryFunc,
    +	externalURL *url.URL,
    +) *Expander {
    +	return &Expander{
    +		text: text,
    +		name: name,
    +		data: data,
    +		funcMap: text_template.FuncMap{
    +			"query": func(q string) (queryResult, error) {
    +				return query(ctx, q, timestamp.Time(), queryFunc)
    +			},
    +			"first": func(v queryResult) (*sample, error) {
    +				if len(v) > 0 {
    +					return v[0], nil
    +				}
    +				return nil, errors.New("first() called on vector with no elements")
    +			},
    +			"label": func(label string, s *sample) string {
    +				return s.Labels[label]
    +			},
    +			"value": func(s *sample) float64 {
    +				return s.Value
    +			},
    +			"strvalue": func(s *sample) string {
    +				return s.Labels["__value__"]
    +			},
    +			"args": func(args ...interface{}) map[string]interface{} {
    +				result := make(map[string]interface{})
    +				for i, a := range args {
    +					result[fmt.Sprintf("arg%d", i)] = a
    +				}
    +				return result
    +			},
    +			"reReplaceAll": func(pattern, repl, text string) string {
    +				re := regexp.MustCompile(pattern)
    +				return re.ReplaceAllString(text, repl)
    +			},
    +			"safeHtml": func(text string) html_template.HTML {
    +				return html_template.HTML(text)
    +			},
    +			"match":     regexp.MatchString,
    +			"title":     strings.Title,
    +			"toUpper":   strings.ToUpper,
    +			"toLower":   strings.ToLower,
    +			"graphLink": strutil.GraphLinkForExpression,
    +			"tableLink": strutil.TableLinkForExpression,
    +			"sortByLabel": func(label string, v queryResult) queryResult {
    +				sorter := queryResultByLabelSorter{v[:], label}
    +				sort.Stable(sorter)
    +				return v
    +			},
    +			"humanize": func(v float64) string {
    +				if v == 0 || math.IsNaN(v) || math.IsInf(v, 0) {
    +					return fmt.Sprintf("%.4g", v)
    +				}
    +				if math.Abs(v) >= 1 {
    +					prefix := ""
    +					for _, p := range []string{"k", "M", "G", "T", "P", "E", "Z", "Y"} {
    +						if math.Abs(v) < 1000 {
    +							break
    +						}
    +						prefix = p
    +						v /= 1000
    +					}
    +					return fmt.Sprintf("%.4g%s", v, prefix)
    +				}
    +				prefix := ""
    +				for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
    +					if math.Abs(v) >= 1 {
    +						break
    +					}
    +					prefix = p
    +					v *= 1000
    +				}
    +				return fmt.Sprintf("%.4g%s", v, prefix)
    +			},
    +			"humanize1024": func(v float64) string {
    +				if math.Abs(v) <= 1 || math.IsNaN(v) || math.IsInf(v, 0) {
    +					return fmt.Sprintf("%.4g", v)
    +				}
    +				prefix := ""
    +				for _, p := range []string{"ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"} {
    +					if math.Abs(v) < 1024 {
    +						break
    +					}
    +					prefix = p
    +					v /= 1024
    +				}
    +				return fmt.Sprintf("%.4g%s", v, prefix)
    +			},
    +			"humanizeDuration": func(v float64) string {
    +				if math.IsNaN(v) || math.IsInf(v, 0) {
    +					return fmt.Sprintf("%.4g", v)
    +				}
    +				if v == 0 {
    +					return fmt.Sprintf("%.4gs", v)
    +				}
    +				if math.Abs(v) >= 1 {
    +					sign := ""
    +					if v < 0 {
    +						sign = "-"
    +						v = -v
    +					}
    +					seconds := int64(v) % 60
    +					minutes := (int64(v) / 60) % 60
    +					hours := (int64(v) / 60 / 60) % 24
    +					days := (int64(v) / 60 / 60 / 24)
    +					// For days to minutes, we display seconds as an integer.
    +					if days != 0 {
    +						return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds)
    +					}
    +					if hours != 0 {
    +						return fmt.Sprintf("%s%dh %dm %ds", sign, hours, minutes, seconds)
    +					}
    +					if minutes != 0 {
    +						return fmt.Sprintf("%s%dm %ds", sign, minutes, seconds)
    +					}
    +					// For seconds, we display 4 significant digts.
    +					return fmt.Sprintf("%s%.4gs", sign, v)
    +				}
    +				prefix := ""
    +				for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
    +					if math.Abs(v) >= 1 {
    +						break
    +					}
    +					prefix = p
    +					v *= 1000
    +				}
    +				return fmt.Sprintf("%.4g%ss", v, prefix)
    +			},
    +			"humanizeTimestamp": func(v float64) string {
    +				if math.IsNaN(v) || math.IsInf(v, 0) {
    +					return fmt.Sprintf("%.4g", v)
    +				}
    +				t := model.TimeFromUnixNano(int64(v * 1e9)).Time().UTC()
    +				return fmt.Sprint(t)
    +			},
    +			"pathPrefix": func() string {
    +				return externalURL.Path
    +			},
    +			"externalURL": func() string {
    +				return externalURL.String()
    +			},
    +		},
    +	}
    +}
    +
    +// Funcs adds the functions in fm to the Expander's function map.
    +// Existing functions will be overwritten in case of conflict.
    +func (te Expander) Funcs(fm text_template.FuncMap) {
    +	for k, v := range fm {
    +		te.funcMap[k] = v
    +	}
    +}
    +
    +// Expand expands a template in text (non-HTML) mode.
    +func (te Expander) Expand() (result string, resultErr error) {
    +	// It'd better to have no alert description than to kill the whole process
    +	// if there's a bug in the template.
    +	defer func() {
    +		if r := recover(); r != nil {
    +			var ok bool
    +			resultErr, ok = r.(error)
    +			if !ok {
    +				resultErr = fmt.Errorf("panic expanding template %v: %v", te.name, r)
    +			}
    +		}
    +	}()
    +
    +	tmpl, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
    +	if err != nil {
    +		return "", fmt.Errorf("error parsing template %v: %v", te.name, err)
    +	}
    +	var buffer bytes.Buffer
    +	err = tmpl.Execute(&buffer, te.data)
    +	if err != nil {
    +		return "", fmt.Errorf("error executing template %v: %v", te.name, err)
    +	}
    +	return buffer.String(), nil
    +}
    +
    +// ExpandHTML expands a template with HTML escaping, with templates read from the given files.
    +func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr error) {
    +	defer func() {
    +		if r := recover(); r != nil {
    +			var ok bool
    +			resultErr, ok = r.(error)
    +			if !ok {
    +				resultErr = fmt.Errorf("panic expanding template %v: %v", te.name, r)
    +			}
    +		}
    +	}()
    +
    +	tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
    +	tmpl.Option("missingkey=zero")
    +	tmpl.Funcs(html_template.FuncMap{
    +		"tmpl": func(name string, data interface{}) (html_template.HTML, error) {
    +			var buffer bytes.Buffer
    +			err := tmpl.ExecuteTemplate(&buffer, name, data)
    +			return html_template.HTML(buffer.String()), err
    +		},
    +	})
    +	tmpl, err := tmpl.Parse(te.text)
    +	if err != nil {
    +		return "", fmt.Errorf("error parsing template %v: %v", te.name, err)
    +	}
    +	if len(templateFiles) > 0 {
    +		_, err = tmpl.ParseFiles(templateFiles...)
    +		if err != nil {
    +			return "", fmt.Errorf("error parsing template files for %v: %v", te.name, err)
    +		}
    +	}
    +	var buffer bytes.Buffer
    +	err = tmpl.Execute(&buffer, te.data)
    +	if err != nil {
    +		return "", fmt.Errorf("error executing template %v: %v", te.name, err)
    +	}
    +	return buffer.String(), nil
    +}
    diff --git a/src/prometheus/template/template_test.go b/src/prometheus/template/template_test.go
    new file mode 100644
    index 0000000..d7ce8c4
    --- /dev/null
    +++ b/src/prometheus/template/template_test.go
    @@ -0,0 +1,283 @@
    +// Copyright 2014 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package template
    +
    +import (
    +	"context"
    +	"math"
    +	"net/url"
    +	"testing"
    +	"time"
    +
    +	"github.com/prometheus/prometheus/pkg/labels"
    +	"github.com/prometheus/prometheus/promql"
    +	"github.com/prometheus/prometheus/util/testutil"
    +)
    +
    +type testTemplatesScenario struct {
    +	text        string
    +	output      string
    +	input       interface{}
    +	queryResult promql.Vector
    +	shouldFail  bool
    +	html        bool
    +	errorMsg    string
    +}
    +
    +func TestTemplateExpansion(t *testing.T) {
    +	scenarios := []testTemplatesScenario{
    +		{
    +			// No template.
    +			text:   "plain text",
    +			output: "plain text",
    +		},
    +		{
    +			// Simple value.
    +			text:   "{{ 1 }}",
    +			output: "1",
    +		},
    +		{
    +			// Non-ASCII space (not allowed in text/template, see https://github.com/golang/go/blob/master/src/text/template/parse/lex.go#L98)
    +			text:       "{{ }}",
    +			shouldFail: true,
    +			errorMsg:   "error parsing template test: template: test:1: unexpected unrecognized character in action: U+00A0 in command",
    +		},
    +		{
    +			// HTML escaping.
    +			text:   "{{ \"\" }}",
    +			output: "<b>",
    +			html:   true,
    +		},
    +		{
    +			// Disabling HTML escaping.
    +			text:   "{{ \"\" | safeHtml }}",
    +			output: "",
    +			html:   true,
    +		},
    +		{
    +			// HTML escaping doesn't apply to non-html.
    +			text:   "{{ \"\" }}",
    +			output: "",
    +		},
    +		{
    +			// Pass multiple arguments to templates.
    +			text:   "{{define \"x\"}}{{.arg0}} {{.arg1}}{{end}}{{template \"x\" (args 1 \"2\")}}",
    +			output: "1 2",
    +		},
    +		{
    +			text:        "{{ query \"1.5\" | first | value }}",
    +			output:      "1.5",
    +			queryResult: promql.Vector{{Point: promql.Point{T: 0, V: 1.5}}},
    +		},
    +		{
    +			// Get value from query.
    +			text: "{{ query \"metric{instance='a'}\" | first | value }}",
    +			queryResult: promql.Vector{
    +				{
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
    +					Point:  promql.Point{T: 0, V: 11},
    +				}},
    +			output: "11",
    +		},
    +		{
    +			// Get label from query.
    +			text: "{{ query \"metric{instance='a'}\" | first | label \"instance\" }}",
    +
    +			queryResult: promql.Vector{
    +				{
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
    +					Point:  promql.Point{T: 0, V: 11},
    +				}},
    +			output: "a",
    +		},
    +		{
    +			// Missing label is empty when using label function.
    +			text: "{{ query \"metric{instance='a'}\" | first | label \"foo\" }}",
    +			queryResult: promql.Vector{
    +				{
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
    +					Point:  promql.Point{T: 0, V: 11},
    +				}},
    +			output: "",
    +		},
    +		{
    +			// Missing label is empty when not using label function.
    +			text: "{{ $x := query \"metric\" | first }}{{ $x.Labels.foo }}",
    +			queryResult: promql.Vector{
    +				{
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
    +					Point:  promql.Point{T: 0, V: 11},
    +				}},
    +			output: "",
    +		},
    +		{
    +			text: "{{ $x := query \"metric\" | first }}{{ $x.Labels.foo }}",
    +			queryResult: promql.Vector{
    +				{
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
    +					Point:  promql.Point{T: 0, V: 11},
    +				}},
    +			output: "",
    +			html:   true,
    +		},
    +		{
    +			// Range over query and sort by label.
    +			text: "{{ range query \"metric\" | sortByLabel \"instance\" }}{{.Labels.instance}}:{{.Value}}: {{end}}",
    +			queryResult: promql.Vector{
    +				{
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
    +					Point:  promql.Point{T: 0, V: 11},
    +				}, {
    +					Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "b"),
    +					Point:  promql.Point{T: 0, V: 21},
    +				}},
    +			output: "a:11: b:21: ",
    +		},
    +		{
    +			// Unparsable template.
    +			text:       "{{",
    +			shouldFail: true,
    +			errorMsg:   "error parsing template test: template: test:1: unexpected unclosed action in command",
    +		},
    +		{
    +			// Error in function.
    +			text:        "{{ query \"missing\" | first }}",
    +			queryResult: promql.Vector{},
    +			shouldFail:  true,
    +			errorMsg:    "error executing template test: template: test:1:21: executing \"test\" at : error calling first: first() called on vector with no elements",
    +		},
    +		{
    +			// Panic.
    +			text:        "{{ (query \"missing\").banana }}",
    +			queryResult: promql.Vector{},
    +			shouldFail:  true,
    +			errorMsg:    "error executing template test: template: test:1:10: executing \"test\" at <\"missing\">: can't evaluate field banana in type template.queryResult",
    +		},
    +		{
    +			// Regex replacement.
    +			text:   "{{ reReplaceAll \"(a)b\" \"x$1\" \"ab\" }}",
    +			output: "xa",
    +		},
    +		{
    +			// Humanize.
    +			text:   "{{ range . }}{{ humanize . }}:{{ end }}",
    +			input:  []float64{0.0, 1.0, 1234567.0, .12},
    +			output: "0:1:1.235M:120m:",
    +		},
    +		{
    +			// Humanize1024.
    +			text:   "{{ range . }}{{ humanize1024 . }}:{{ end }}",
    +			input:  []float64{0.0, 1.0, 1048576.0, .12},
    +			output: "0:1:1Mi:0.12:",
    +		},
    +		{
    +			// HumanizeDuration - seconds.
    +			text:   "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
    +			input:  []float64{0, 1, 60, 3600, 86400, 86400 + 3600, -(86400*2 + 3600*3 + 60*4 + 5), 899.99},
    +			output: "0s:1s:1m 0s:1h 0m 0s:1d 0h 0m 0s:1d 1h 0m 0s:-2d 3h 4m 5s:14m 59s:",
    +		},
    +		{
    +			// HumanizeDuration - subsecond and fractional seconds.
    +			text:   "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
    +			input:  []float64{.1, .0001, .12345, 60.1, 60.5, 1.2345, 12.345},
    +			output: "100ms:100us:123.5ms:1m 0s:1m 0s:1.234s:12.35s:",
    +		},
    +		{
    +			// Humanize* Inf and NaN.
    +			text:   "{{ range . }}{{ humanize . }}:{{ humanize1024 . }}:{{ humanizeDuration . }}:{{humanizeTimestamp .}}:{{ end }}",
    +			input:  []float64{math.Inf(1), math.Inf(-1), math.NaN()},
    +			output: "+Inf:+Inf:+Inf:+Inf:-Inf:-Inf:-Inf:-Inf:NaN:NaN:NaN:NaN:",
    +		},
    +		{
    +			// HumanizeTimestamp - model.SampleValue input.
    +			text:   "{{ 1435065584.128 | humanizeTimestamp }}",
    +			output: "2015-06-23 13:19:44.128 +0000 UTC",
    +		},
    +		{
    +			// Title.
    +			text:   "{{ \"aa bb CC\" | title }}",
    +			output: "Aa Bb CC",
    +		},
    +		{
    +			// toUpper.
    +			text:   "{{ \"aa bb CC\" | toUpper }}",
    +			output: "AA BB CC",
    +		},
    +		{
    +			// toLower.
    +			text:   "{{ \"aA bB CC\" | toLower }}",
    +			output: "aa bb cc",
    +		},
    +		{
    +			// Match.
    +			text:   "{{ match \"a+\" \"aa\" }} {{ match \"a+\" \"b\" }}",
    +			output: "true false",
    +		},
    +		{
    +			// graphLink.
    +			text:   "{{ graphLink \"up\" }}",
    +			output: "/graph?g0.expr=up&g0.tab=0",
    +		},
    +		{
    +			// tableLink.
    +			text:   "{{ tableLink \"up\" }}",
    +			output: "/graph?g0.expr=up&g0.tab=1",
    +		},
    +		{
    +			// tmpl.
    +			text:   "{{ define \"a\" }}x{{ end }}{{ $name := \"a\"}}{{ tmpl $name . }}",
    +			output: "x",
    +			html:   true,
    +		},
    +		{
    +			// pathPrefix.
    +			text:   "{{ pathPrefix }}",
    +			output: "/path/prefix",
    +		},
    +		{
    +			// externalURL.
    +			text:   "{{ externalURL }}",
    +			output: "http://testhost:9090/path/prefix",
    +		},
    +	}
    +
    +	extURL, err := url.Parse("http://testhost:9090/path/prefix")
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	for _, s := range scenarios {
    +		queryFunc := func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) {
    +			return s.queryResult, nil
    +		}
    +		var result string
    +		var err error
    +		expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, 0, queryFunc, extURL)
    +		if s.html {
    +			result, err = expander.ExpandHTML(nil)
    +		} else {
    +			result, err = expander.Expand()
    +		}
    +		if s.shouldFail {
    +			testutil.NotOk(t, err, "%v", s.text)
    +			continue
    +		}
    +
    +		testutil.Ok(t, err)
    +
    +		if err == nil {
    +			testutil.Equals(t, result, s.output)
    +		}
    +	}
    +}
    diff --git a/src/prometheus/util/httputil/compression.go b/src/prometheus/util/httputil/compression.go
    new file mode 100644
    index 0000000..b96c088
    --- /dev/null
    +++ b/src/prometheus/util/httputil/compression.go
    @@ -0,0 +1,92 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package httputil
    +
    +import (
    +	"compress/gzip"
    +	"compress/zlib"
    +	"io"
    +	"net/http"
    +	"strings"
    +)
    +
    +const (
    +	acceptEncodingHeader  = "Accept-Encoding"
    +	contentEncodingHeader = "Content-Encoding"
    +	gzipEncoding          = "gzip"
    +	deflateEncoding       = "deflate"
    +)
    +
    +// Wrapper around http.Handler which adds suitable response compression based
    +// on the client's Accept-Encoding headers.
    +type compressedResponseWriter struct {
    +	http.ResponseWriter
    +	writer io.Writer
    +}
    +
    +// Writes HTTP response content data.
    +func (c *compressedResponseWriter) Write(p []byte) (int, error) {
    +	return c.writer.Write(p)
    +}
    +
    +// Closes the compressedResponseWriter and ensures to flush all data before.
    +func (c *compressedResponseWriter) Close() {
    +	if zlibWriter, ok := c.writer.(*zlib.Writer); ok {
    +		zlibWriter.Flush()
    +	}
    +	if gzipWriter, ok := c.writer.(*gzip.Writer); ok {
    +		gzipWriter.Flush()
    +	}
    +	if closer, ok := c.writer.(io.Closer); ok {
    +		defer closer.Close()
    +	}
    +}
    +
    +// Constructs a new compressedResponseWriter based on client request headers.
    +func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter {
    +	encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",")
    +	for _, encoding := range encodings {
    +		switch strings.TrimSpace(encoding) {
    +		case gzipEncoding:
    +			writer.Header().Set(contentEncodingHeader, gzipEncoding)
    +			return &compressedResponseWriter{
    +				ResponseWriter: writer,
    +				writer:         gzip.NewWriter(writer),
    +			}
    +		case deflateEncoding:
    +			writer.Header().Set(contentEncodingHeader, deflateEncoding)
    +			return &compressedResponseWriter{
    +				ResponseWriter: writer,
    +				writer:         zlib.NewWriter(writer),
    +			}
    +		}
    +	}
    +	return &compressedResponseWriter{
    +		ResponseWriter: writer,
    +		writer:         writer,
    +	}
    +}
    +
    +// CompressionHandler is a wrapper around http.Handler which adds suitable
    +// response compression based on the client's Accept-Encoding headers.
    +type CompressionHandler struct {
    +	Handler http.Handler
    +}
    +
    +// ServeHTTP adds compression to the original http.Handler's ServeHTTP() method.
    +func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
    +	compWriter := newCompressedResponseWriter(writer, req)
    +	c.Handler.ServeHTTP(compWriter, req)
    +	compWriter.Close()
    +}
    diff --git a/src/prometheus/util/promlint/promlint.go b/src/prometheus/util/promlint/promlint.go
    new file mode 100644
    index 0000000..8ac1ae8
    --- /dev/null
    +++ b/src/prometheus/util/promlint/promlint.go
    @@ -0,0 +1,268 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +// Package promlint provides a linter for Prometheus metrics.
    +package promlint
    +
    +import (
    +	"fmt"
    +	"io"
    +	"sort"
    +	"strings"
    +
    +	dto "github.com/prometheus/client_model/go"
    +	"github.com/prometheus/common/expfmt"
    +)
    +
    +// A Linter is a Prometheus metrics linter.  It identifies issues with metric
    +// names, types, and metadata, and reports them to the caller.
    +type Linter struct {
    +	r io.Reader
    +}
    +
    +// A Problem is an issue detected by a Linter.
    +type Problem struct {
    +	// The name of the metric indicated by this Problem.
    +	Metric string
    +
    +	// A description of the issue for this Problem.
    +	Text string
    +}
    +
    +// problems is a slice of Problems with a helper method to easily append
    +// additional Problems to the slice.
    +type problems []Problem
    +
    +// Add appends a new Problem to the slice for the specified metric, with
    +// the specified issue text.
    +func (p *problems) Add(mf dto.MetricFamily, text string) {
    +	*p = append(*p, Problem{
    +		Metric: mf.GetName(),
    +		Text:   text,
    +	})
    +}
    +
    +// New creates a new Linter that reads an input stream of Prometheus metrics.
    +// Only the text exposition format is supported.
    +func New(r io.Reader) *Linter {
    +	return &Linter{
    +		r: r,
    +	}
    +}
    +
    +// Lint performs a linting pass, returning a slice of Problems indicating any
    +// issues found in the metrics stream.  The slice is sorted by metric name
    +// and issue description.
    +func (l *Linter) Lint() ([]Problem, error) {
    +	// TODO(mdlayher): support for protobuf exposition format?
    +	d := expfmt.NewDecoder(l.r, expfmt.FmtText)
    +
    +	var problems []Problem
    +
    +	var mf dto.MetricFamily
    +	for {
    +		if err := d.Decode(&mf); err != nil {
    +			if err == io.EOF {
    +				break
    +			}
    +
    +			return nil, err
    +		}
    +
    +		problems = append(problems, lint(mf)...)
    +	}
    +
    +	// Ensure deterministic output.
    +	sort.SliceStable(problems, func(i, j int) bool {
    +		if problems[i].Metric < problems[j].Metric {
    +			return true
    +		}
    +
    +		return problems[i].Text < problems[j].Text
    +	})
    +
    +	return problems, nil
    +}
    +
    +// lint is the entry point for linting a single metric.
    +func lint(mf dto.MetricFamily) []Problem {
    +	fns := []func(mf dto.MetricFamily) []Problem{
    +		lintHelp,
    +		lintMetricUnits,
    +		lintCounter,
    +		lintHistogramSummaryReserved,
    +	}
    +
    +	var problems []Problem
    +	for _, fn := range fns {
    +		problems = append(problems, fn(mf)...)
    +	}
    +
    +	// TODO(mdlayher): lint rules for specific metrics types.
    +	return problems
    +}
    +
    +// lintHelp detects issues related to the help text for a metric.
    +func lintHelp(mf dto.MetricFamily) []Problem {
    +	var problems problems
    +
    +	// Expect all metrics to have help text available.
    +	if mf.Help == nil {
    +		problems.Add(mf, "no help text")
    +	}
    +
    +	return problems
    +}
    +
    +// lintMetricUnits detects issues with metric unit names.
    +func lintMetricUnits(mf dto.MetricFamily) []Problem {
    +	var problems problems
    +
    +	unit, base, ok := metricUnits(*mf.Name)
    +	if !ok {
    +		// No known units detected.
    +		return nil
    +	}
    +
    +	// Unit is already a base unit.
    +	if unit == base {
    +		return nil
    +	}
    +
    +	problems.Add(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))
    +
    +	return problems
    +}
    +
    +// lintCounter detects issues specific to counters, as well as patterns that should
    +// only be used with counters.
    +func lintCounter(mf dto.MetricFamily) []Problem {
    +	var problems problems
    +
    +	isCounter := mf.GetType() == dto.MetricType_COUNTER
    +	isUntyped := mf.GetType() == dto.MetricType_UNTYPED
    +	hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total")
    +
    +	switch {
    +	case isCounter && !hasTotalSuffix:
    +		problems.Add(mf, `counter metrics should have "_total" suffix`)
    +	case !isUntyped && !isCounter && hasTotalSuffix:
    +		problems.Add(mf, `non-counter metrics should not have "_total" suffix`)
    +	}
    +
    +	return problems
    +}
    +
    +// lintHistogramSummaryReserved detects when other types of metrics use names or labels
    +// reserved for use by histograms and/or summaries.
    +func lintHistogramSummaryReserved(mf dto.MetricFamily) []Problem {
    +	// These rules do not apply to untyped metrics.
    +	t := mf.GetType()
    +	if t == dto.MetricType_UNTYPED {
    +		return nil
    +	}
    +
    +	var problems problems
    +
    +	isHistogram := t == dto.MetricType_HISTOGRAM
    +	isSummary := t == dto.MetricType_SUMMARY
    +
    +	n := mf.GetName()
    +
    +	if !isHistogram && strings.HasSuffix(n, "_bucket") {
    +		problems.Add(mf, `non-histogram metrics should not have "_bucket" suffix`)
    +	}
    +	if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") {
    +		problems.Add(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)
    +	}
    +	if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") {
    +		problems.Add(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)
    +	}
    +
    +	for _, m := range mf.GetMetric() {
    +		for _, l := range m.GetLabel() {
    +			ln := l.GetName()
    +
    +			if !isHistogram && ln == "le" {
    +				problems.Add(mf, `non-histogram metrics should not have "le" label`)
    +			}
    +			if !isSummary && ln == "quantile" {
    +				problems.Add(mf, `non-summary metrics should not have "quantile" label`)
    +			}
    +		}
    +	}
    +
    +	return problems
    +}
    +
    +// metricUnits attempts to detect known unit types used as part of a metric name,
    +// e.g. "foo_bytes_total" or "bar_baz_milligrams".
    +func metricUnits(m string) (unit string, base string, ok bool) {
    +	ss := strings.Split(m, "_")
    +
    +	for _, u := range baseUnits {
    +		// Also check for "no prefix".
    +		for _, p := range append(unitPrefixes, "") {
    +			for _, s := range ss {
    +				// Attempt to explicitly match a known unit with a known prefix,
    +				// as some words may look like "units" when matching suffix.
    +				//
    +				// As an example, "thermometers" should not match "meters", but
    +				// "kilometers" should.
    +				if s == p+u {
    +					return p + u, u, true
    +				}
    +			}
    +		}
    +	}
    +
    +	return "", "", false
    +}
    +
    +// Units and their possible prefixes recognized by this library.  More can be
    +// added over time as needed.
    +var (
    +	baseUnits = []string{
    +		"amperes",
    +		"bytes",
    +		"candela",
    +		"grams",
    +		"kelvin", // Both plural and non-plural form allowed.
    +		"kelvins",
    +		"meters", // Both American and international spelling permitted.
    +		"metres",
    +		"moles",
    +		"seconds",
    +	}
    +
    +	unitPrefixes = []string{
    +		"pico",
    +		"nano",
    +		"micro",
    +		"milli",
    +		"centi",
    +		"deci",
    +		"deca",
    +		"hecto",
    +		"kilo",
    +		"kibi",
    +		"mega",
    +		"mibi",
    +		"giga",
    +		"gibi",
    +		"tera",
    +		"tebi",
    +		"peta",
    +		"pebi",
    +	}
    +)
    diff --git a/src/prometheus/util/promlint/promlint_test.go b/src/prometheus/util/promlint/promlint_test.go
    new file mode 100644
    index 0000000..dec3ed6
    --- /dev/null
    +++ b/src/prometheus/util/promlint/promlint_test.go
    @@ -0,0 +1,497 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package promlint_test
    +
    +import (
    +	"reflect"
    +	"strings"
    +	"testing"
    +
    +	"github.com/prometheus/prometheus/util/promlint"
    +)
    +
    +func TestLintNoHelpText(t *testing.T) {
    +	const msg = "no help text"
    +
    +	tests := []struct {
    +		name     string
    +		in       string
    +		problems []promlint.Problem
    +	}{
    +		{
    +			name: "no help",
    +			in: `
    +# TYPE go_goroutines gauge
    +go_goroutines 24
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "go_goroutines",
    +				Text:   msg,
    +			}},
    +		},
    +		{
    +			name: "empty help",
    +			in: `
    +# HELP go_goroutines
    +# TYPE go_goroutines gauge
    +go_goroutines 24
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "go_goroutines",
    +				Text:   msg,
    +			}},
    +		},
    +		{
    +			name: "no help and empty help",
    +			in: `
    +# HELP go_goroutines
    +# TYPE go_goroutines gauge
    +go_goroutines 24
    +# TYPE go_threads gauge
    +go_threads 10
    +`,
    +			problems: []promlint.Problem{
    +				{
    +					Metric: "go_goroutines",
    +					Text:   msg,
    +				},
    +				{
    +					Metric: "go_threads",
    +					Text:   msg,
    +				},
    +			},
    +		},
    +		{
    +			name: "OK",
    +			in: `
    +# HELP go_goroutines Number of goroutines that currently exist.
    +# TYPE go_goroutines gauge
    +go_goroutines 24
    +`,
    +		},
    +	}
    +
    +	for _, tt := range tests {
    +		t.Run(tt.name, func(t *testing.T) {
    +			l := promlint.New(strings.NewReader(tt.in))
    +
    +			problems, err := l.Lint()
    +			if err != nil {
    +				t.Fatalf("unexpected error: %v", err)
    +			}
    +
    +			if want, got := tt.problems, problems; !reflect.DeepEqual(want, got) {
    +				t.Fatalf("unexpected problems:\n- want: %v\n-  got: %v",
    +					want, got)
    +			}
    +		})
    +	}
    +}
    +
    +func TestLintMetricUnits(t *testing.T) {
    +	tests := []struct {
    +		name     string
    +		in       string
    +		problems []promlint.Problem
    +	}{
    +		{
    +			name: "amperes",
    +			in: `
    +# HELP x_milliamperes Test metric.
    +# TYPE x_milliamperes untyped
    +x_milliamperes 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_milliamperes",
    +				Text:   `use base unit "amperes" instead of "milliamperes"`,
    +			}},
    +		},
    +		{
    +			name: "bytes",
    +			in: `
    +# HELP x_gigabytes Test metric.
    +# TYPE x_gigabytes untyped
    +x_gigabytes 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_gigabytes",
    +				Text:   `use base unit "bytes" instead of "gigabytes"`,
    +			}},
    +		},
    +		{
    +			name: "candela",
    +			in: `
    +# HELP x_kilocandela Test metric.
    +# TYPE x_kilocandela untyped
    +x_kilocandela 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_kilocandela",
    +				Text:   `use base unit "candela" instead of "kilocandela"`,
    +			}},
    +		},
    +		{
    +			name: "grams",
    +			in: `
    +# HELP x_kilograms Test metric.
    +# TYPE x_kilograms untyped
    +x_kilograms 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_kilograms",
    +				Text:   `use base unit "grams" instead of "kilograms"`,
    +			}},
    +		},
    +		{
    +			name: "kelvin",
    +			in: `
    +# HELP x_nanokelvin Test metric.
    +# TYPE x_nanokelvin untyped
    +x_nanokelvin 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_nanokelvin",
    +				Text:   `use base unit "kelvin" instead of "nanokelvin"`,
    +			}},
    +		},
    +		{
    +			name: "kelvins",
    +			in: `
    +# HELP x_nanokelvins Test metric.
    +# TYPE x_nanokelvins untyped
    +x_nanokelvins 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_nanokelvins",
    +				Text:   `use base unit "kelvins" instead of "nanokelvins"`,
    +			}},
    +		},
    +		{
    +			name: "meters",
    +			in: `
    +# HELP x_kilometers Test metric.
    +# TYPE x_kilometers untyped
    +x_kilometers 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_kilometers",
    +				Text:   `use base unit "meters" instead of "kilometers"`,
    +			}},
    +		},
    +		{
    +			name: "metres",
    +			in: `
    +# HELP x_kilometres Test metric.
    +# TYPE x_kilometres untyped
    +x_kilometres 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_kilometres",
    +				Text:   `use base unit "metres" instead of "kilometres"`,
    +			}},
    +		},
    +		{
    +			name: "moles",
    +			in: `
    +# HELP x_picomoles Test metric.
    +# TYPE x_picomoles untyped
    +x_picomoles 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_picomoles",
    +				Text:   `use base unit "moles" instead of "picomoles"`,
    +			}},
    +		},
    +		{
    +			name: "seconds",
    +			in: `
    +# HELP x_microseconds Test metric.
    +# TYPE x_microseconds untyped
    +x_microseconds 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_microseconds",
    +				Text:   `use base unit "seconds" instead of "microseconds"`,
    +			}},
    +		},
    +		{
    +			name: "OK",
    +			in: `
    +# HELP thermometers_kelvin Test metric with name that looks like "meters".
    +# TYPE thermometers_kelvin untyped
    +thermometers_kelvin 0
    +`,
    +		},
    +	}
    +
    +	for _, tt := range tests {
    +		t.Run(tt.name, func(t *testing.T) {
    +			l := promlint.New(strings.NewReader(tt.in))
    +
    +			problems, err := l.Lint()
    +			if err != nil {
    +				t.Fatalf("unexpected error: %v", err)
    +			}
    +
    +			if want, got := tt.problems, problems; !reflect.DeepEqual(want, got) {
    +				t.Fatalf("unexpected problems:\n- want: %v\n-  got: %v",
    +					want, got)
    +			}
    +		})
    +	}
    +}
    +
    +func TestLintCounter(t *testing.T) {
    +	tests := []struct {
    +		name     string
    +		in       string
    +		problems []promlint.Problem
    +	}{
    +		{
    +			name: "counter without _total suffix",
    +			in: `
    +# HELP x_bytes Test metric.
    +# TYPE x_bytes counter
    +x_bytes 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes",
    +				Text:   `counter metrics should have "_total" suffix`,
    +			}},
    +		},
    +		{
    +			name: "gauge with _total suffix",
    +			in: `
    +# HELP x_bytes_total Test metric.
    +# TYPE x_bytes_total gauge
    +x_bytes_total 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes_total",
    +				Text:   `non-counter metrics should not have "_total" suffix`,
    +			}},
    +		},
    +		{
    +			name: "counter with _total suffix",
    +			in: `
    +# HELP x_bytes_total Test metric.
    +# TYPE x_bytes_total counter
    +x_bytes_total 10
    +`,
    +		},
    +		{
    +			name: "gauge without _total suffix",
    +			in: `
    +# HELP x_bytes Test metric.
    +# TYPE x_bytes gauge
    +x_bytes 10
    +`,
    +		},
    +		{
    +			name: "untyped with _total suffix",
    +			in: `
    +# HELP x_bytes_total Test metric.
    +# TYPE x_bytes_total untyped
    +x_bytes_total 10
    +`,
    +		},
    +		{
    +			name: "untyped without _total suffix",
    +			in: `
    +# HELP x_bytes Test metric.
    +# TYPE x_bytes untyped
    +x_bytes 10
    +`,
    +		},
    +	}
    +
    +	for _, tt := range tests {
    +		t.Run(tt.name, func(t *testing.T) {
    +			l := promlint.New(strings.NewReader(tt.in))
    +
    +			problems, err := l.Lint()
    +			if err != nil {
    +				t.Fatalf("unexpected error: %v", err)
    +			}
    +
    +			if want, got := tt.problems, problems; !reflect.DeepEqual(want, got) {
    +				t.Fatalf("unexpected problems:\n- want: %v\n-  got: %v",
    +					want, got)
    +			}
    +		})
    +	}
    +}
    +
    +func TestLintHistogramSummaryReserved(t *testing.T) {
    +	tests := []struct {
    +		name     string
    +		in       string
    +		problems []promlint.Problem
    +	}{
    +		{
    +			name: "gauge with _bucket suffix",
    +			in: `
    +# HELP x_bytes_bucket Test metric.
    +# TYPE x_bytes_bucket gauge
    +x_bytes_bucket 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes_bucket",
    +				Text:   `non-histogram metrics should not have "_bucket" suffix`,
    +			}},
    +		},
    +		{
    +			name: "gauge with _count suffix",
    +			in: `
    +# HELP x_bytes_count Test metric.
    +# TYPE x_bytes_count gauge
    +x_bytes_count 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes_count",
    +				Text:   `non-histogram and non-summary metrics should not have "_count" suffix`,
    +			}},
    +		},
    +		{
    +			name: "gauge with _sum suffix",
    +			in: `
    +# HELP x_bytes_sum Test metric.
    +# TYPE x_bytes_sum gauge
    +x_bytes_sum 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes_sum",
    +				Text:   `non-histogram and non-summary metrics should not have "_sum" suffix`,
    +			}},
    +		},
    +		{
    +			name: "gauge with le label",
    +			in: `
    +# HELP x_bytes Test metric.
    +# TYPE x_bytes gauge
    +x_bytes{le="1"} 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes",
    +				Text:   `non-histogram metrics should not have "le" label`,
    +			}},
    +		},
    +		{
    +			name: "gauge with quantile label",
    +			in: `
    +# HELP x_bytes Test metric.
    +# TYPE x_bytes gauge
    +x_bytes{quantile="1"} 10
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "x_bytes",
    +				Text:   `non-summary metrics should not have "quantile" label`,
    +			}},
    +		},
    +		{
    +			name: "histogram with quantile label",
    +			in: `
    +# HELP tsdb_compaction_duration Duration of compaction runs.
    +# TYPE tsdb_compaction_duration histogram
    +tsdb_compaction_duration_bucket{le="0.005",quantile="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.01",quantile="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.025",quantile="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.05",quantile="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.1",quantile="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.25",quantile="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.5",quantile="0.01"} 57
    +tsdb_compaction_duration_bucket{le="1",quantile="0.01"} 68
    +tsdb_compaction_duration_bucket{le="2.5",quantile="0.01"} 69
    +tsdb_compaction_duration_bucket{le="5",quantile="0.01"} 69
    +tsdb_compaction_duration_bucket{le="10",quantile="0.01"} 69
    +tsdb_compaction_duration_bucket{le="+Inf",quantile="0.01"} 69
    +tsdb_compaction_duration_sum 28.740810936000006
    +tsdb_compaction_duration_count 69
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "tsdb_compaction_duration",
    +				Text:   `non-summary metrics should not have "quantile" label`,
    +			}},
    +		},
    +		{
    +			name: "summary with le label",
    +			in: `
    +# HELP go_gc_duration_seconds A summary of the GC invocation durations.
    +# TYPE go_gc_duration_seconds summary
    +go_gc_duration_seconds{quantile="0",le="0.01"} 4.2365e-05
    +go_gc_duration_seconds{quantile="0.25",le="0.01"} 8.1492e-05
    +go_gc_duration_seconds{quantile="0.5",le="0.01"} 0.000100656
    +go_gc_duration_seconds{quantile="0.75",le="0.01"} 0.000113913
    +go_gc_duration_seconds{quantile="1",le="0.01"} 0.021754305
    +go_gc_duration_seconds_sum 1.769429004
    +go_gc_duration_seconds_count 5962
    +`,
    +			problems: []promlint.Problem{{
    +				Metric: "go_gc_duration_seconds",
    +				Text:   `non-histogram metrics should not have "le" label`,
    +			}},
    +		},
    +		{
    +			name: "histogram OK",
    +			in: `
    +# HELP tsdb_compaction_duration Duration of compaction runs.
    +# TYPE tsdb_compaction_duration histogram
    +tsdb_compaction_duration_bucket{le="0.005"} 0
    +tsdb_compaction_duration_bucket{le="0.01"} 0
    +tsdb_compaction_duration_bucket{le="0.025"} 0
    +tsdb_compaction_duration_bucket{le="0.05"} 0
    +tsdb_compaction_duration_bucket{le="0.1"} 0
    +tsdb_compaction_duration_bucket{le="0.25"} 0
    +tsdb_compaction_duration_bucket{le="0.5"} 57
    +tsdb_compaction_duration_bucket{le="1"} 68
    +tsdb_compaction_duration_bucket{le="2.5"} 69
    +tsdb_compaction_duration_bucket{le="5"} 69
    +tsdb_compaction_duration_bucket{le="10"} 69
    +tsdb_compaction_duration_bucket{le="+Inf"} 69
    +tsdb_compaction_duration_sum 28.740810936000006
    +tsdb_compaction_duration_count 69
    +`,
    +		},
    +		{
    +			name: "summary OK",
    +			in: `
    +# HELP go_gc_duration_seconds A summary of the GC invocation durations.
    +# TYPE go_gc_duration_seconds summary
    +go_gc_duration_seconds{quantile="0"} 4.2365e-05
    +go_gc_duration_seconds{quantile="0.25"} 8.1492e-05
    +go_gc_duration_seconds{quantile="0.5"} 0.000100656
    +go_gc_duration_seconds{quantile="0.75"} 0.000113913
    +go_gc_duration_seconds{quantile="1"} 0.021754305
    +go_gc_duration_seconds_sum 1.769429004
    +go_gc_duration_seconds_count 5962
    +`,
    +		},
    +	}
    +
    +	for _, tt := range tests {
    +		t.Run(tt.name, func(t *testing.T) {
    +			l := promlint.New(strings.NewReader(tt.in))
    +
    +			problems, err := l.Lint()
    +			if err != nil {
    +				t.Fatalf("unexpected error: %v", err)
    +			}
    +
    +			if want, got := tt.problems, problems; !reflect.DeepEqual(want, got) {
    +				t.Fatalf("unexpected problems:\n- want: %v\n-  got: %v",
    +					want, got)
    +			}
    +		})
    +	}
    +}
    diff --git a/src/prometheus/util/stats/query_stats.go b/src/prometheus/util/stats/query_stats.go
    new file mode 100644
    index 0000000..3fd593c
    --- /dev/null
    +++ b/src/prometheus/util/stats/query_stats.go
    @@ -0,0 +1,89 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package stats
    +
    +// QueryTiming identifies the code area or functionality in which time is spent
    +// during a query.
    +type QueryTiming int
    +
    +// Query timings.
    +const (
    +	EvalTotalTime QueryTiming = iota
    +	ResultSortTime
    +	QueryPreparationTime
    +	InnerEvalTime
    +	ExecQueueTime
    +	ExecTotalTime
    +)
    +
    +// Return a string representation of a QueryTiming identifier.
    +func (s QueryTiming) String() string {
    +	switch s {
    +	case EvalTotalTime:
    +		return "Eval total time"
    +	case ResultSortTime:
    +		return "Result sorting time"
    +	case QueryPreparationTime:
    +		return "Query preparation time"
    +	case InnerEvalTime:
    +		return "Inner eval time"
    +	case ExecQueueTime:
    +		return "Exec queue wait time"
    +	case ExecTotalTime:
    +		return "Exec total time"
    +	default:
    +		return "Unknown query timing"
    +	}
    +}
    +
    +// queryTimings with all query timers mapped to durations.
    +type queryTimings struct {
    +	EvalTotalTime        float64 `json:"evalTotalTime"`
    +	ResultSortTime       float64 `json:"resultSortTime"`
    +	QueryPreparationTime float64 `json:"queryPreparationTime"`
    +	InnerEvalTime        float64 `json:"innerEvalTime"`
    +	ExecQueueTime        float64 `json:"execQueueTime"`
    +	ExecTotalTime        float64 `json:"execTotalTime"`
    +}
    +
    +// QueryStats currently only holding query timings.
    +type QueryStats struct {
    +	Timings queryTimings `json:"timings,omitempty"`
    +}
    +
    +// NewQueryStats makes a QueryStats struct with all QueryTimings found in the
    +// given TimerGroup.
    +func NewQueryStats(tg *TimerGroup) *QueryStats {
    +	var qt queryTimings
    +
    +	for s, timer := range tg.timers {
    +		switch s {
    +		case EvalTotalTime:
    +			qt.EvalTotalTime = timer.Duration()
    +		case ResultSortTime:
    +			qt.ResultSortTime = timer.Duration()
    +		case QueryPreparationTime:
    +			qt.QueryPreparationTime = timer.Duration()
    +		case InnerEvalTime:
    +			qt.InnerEvalTime = timer.Duration()
    +		case ExecQueueTime:
    +			qt.ExecQueueTime = timer.Duration()
    +		case ExecTotalTime:
    +			qt.ExecTotalTime = timer.Duration()
    +		}
    +	}
    +
    +	qs := QueryStats{Timings: qt}
    +	return &qs
    +}
    diff --git a/src/prometheus/util/stats/stats_test.go b/src/prometheus/util/stats/stats_test.go
    new file mode 100644
    index 0000000..f5175a0
    --- /dev/null
    +++ b/src/prometheus/util/stats/stats_test.go
    @@ -0,0 +1,61 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package stats
    +
    +import (
    +	"encoding/json"
    +	"regexp"
    +	"testing"
    +	"time"
    +)
    +
    +func TestTimerGroupNewTimer(t *testing.T) {
    +	tg := NewTimerGroup()
    +	timer := tg.GetTimer(ExecTotalTime)
    +	if duration := timer.Duration(); duration != 0 {
    +		t.Fatalf("Expected duration of 0, but it was %f instead.", duration)
    +	}
    +	minimum := 2 * time.Millisecond
    +	timer.Start()
    +	time.Sleep(minimum)
    +	timer.Stop()
    +	if duration := timer.Duration(); duration == 0 {
    +		t.Fatalf("Expected duration greater than 0, but it was %f instead.", duration)
    +	}
    +	if elapsed := timer.ElapsedTime(); elapsed < minimum {
    +		t.Fatalf("Expected elapsed time to be greater than time slept, elapsed was %d, and time slept was %d.", elapsed.Nanoseconds(), minimum)
    +	}
    +}
    +
    +func TestQueryStatsWithTimers(t *testing.T) {
    +	tg := NewTimerGroup()
    +	timer := tg.GetTimer(ExecTotalTime)
    +	timer.Start()
    +	time.Sleep(2 * time.Millisecond)
    +	timer.Stop()
    +
    +	qs := NewQueryStats(tg)
    +	actual, err := json.Marshal(qs)
    +	if err != nil {
    +		t.Fatalf("Unexpected error during serialization: %v", err)
    +	}
    +	// Timing value is one of multiple fields, unit is seconds (float).
    +	match, err := regexp.MatchString(`[,{]"execTotalTime":\d+\.\d+[,}]`, string(actual))
    +	if err != nil {
    +		t.Fatalf("Unexpected error while matching string: %v", err)
    +	}
    +	if !match {
    +		t.Fatalf("Expected timings with one non-zero entry, but got %s.", actual)
    +	}
    +}
    diff --git a/src/prometheus/util/stats/timer.go b/src/prometheus/util/stats/timer.go
    new file mode 100644
    index 0000000..75f5868
    --- /dev/null
    +++ b/src/prometheus/util/stats/timer.go
    @@ -0,0 +1,113 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package stats
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"sort"
    +	"time"
    +)
    +
    +// A Timer that can be started and stopped and accumulates the total time it
    +// was running (the time between Start() and Stop()).
    +type Timer struct {
    +	name     fmt.Stringer
    +	created  time.Time
    +	start    time.Time
    +	duration time.Duration
    +}
    +
    +// Start the timer.
    +func (t *Timer) Start() *Timer {
    +	t.start = time.Now()
    +	return t
    +}
    +
    +// Stop the timer.
    +func (t *Timer) Stop() {
    +	t.duration += time.Since(t.start)
    +}
    +
    +// ElapsedTime returns the time that passed since starting the timer.
    +func (t *Timer) ElapsedTime() time.Duration {
    +	return time.Since(t.start)
    +}
    +
    +// Duration returns the duration value of the timer in seconds.
    +func (t *Timer) Duration() float64 {
    +	return t.duration.Seconds()
    +}
    +
    +// Return a string representation of the Timer.
    +func (t *Timer) String() string {
    +	return fmt.Sprintf("%s: %s", t.name, t.duration)
    +}
    +
    +// A TimerGroup represents a group of timers relevant to a single query.
    +type TimerGroup struct {
    +	timers map[fmt.Stringer]*Timer
    +}
    +
    +// NewTimerGroup constructs a new TimerGroup.
    +func NewTimerGroup() *TimerGroup {
    +	return &TimerGroup{timers: map[fmt.Stringer]*Timer{}}
    +}
    +
    +// GetTimer gets (and creates, if necessary) the Timer for a given code section.
    +func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer {
    +	if timer, exists := t.timers[name]; exists {
    +		return timer
    +	}
    +	timer := &Timer{
    +		name:    name,
    +		created: time.Now(),
    +	}
    +	t.timers[name] = timer
    +	return timer
    +}
    +
    +// Timers is a slice of Timer pointers that implements Len and Swap from
    +// sort.Interface.
    +type Timers []*Timer
    +
    +type byCreationTimeSorter struct{ Timers }
    +
    +// Len implements sort.Interface.
    +func (t Timers) Len() int {
    +	return len(t)
    +}
    +
    +// Swap implements sort.Interface.
    +func (t Timers) Swap(i, j int) {
    +	t[i], t[j] = t[j], t[i]
    +}
    +
    +func (s byCreationTimeSorter) Less(i, j int) bool {
    +	return s.Timers[i].created.Before(s.Timers[j].created)
    +}
    +
    +// Return a string representation of a TimerGroup.
    +func (t *TimerGroup) String() string {
    +	timers := byCreationTimeSorter{}
    +	for _, timer := range t.timers {
    +		timers.Timers = append(timers.Timers, timer)
    +	}
    +	sort.Sort(timers)
    +	result := &bytes.Buffer{}
    +	for _, timer := range timers.Timers {
    +		fmt.Fprintf(result, "%s\n", timer)
    +	}
    +	return result.String()
    +}
    diff --git a/src/prometheus/util/strutil/quote.go b/src/prometheus/util/strutil/quote.go
    new file mode 100644
    index 0000000..981ad47
    --- /dev/null
    +++ b/src/prometheus/util/strutil/quote.go
    @@ -0,0 +1,223 @@
    +// Copyright 2015 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package strutil
    +
    +import (
    +	"errors"
    +	"unicode/utf8"
    +)
    +
    +// ErrSyntax indicates that a value does not have the right syntax for the target type.
    +var ErrSyntax = errors.New("invalid syntax")
    +
    +// Unquote interprets s as a single-quoted, double-quoted, or backquoted
    +// Prometheus query language string literal, returning the string value that s
    +// quotes.
    +//
    +// NOTE: This function as well as the necessary helper functions below
    +// (unquoteChar, contains, unhex) and associated tests have been adapted from
    +// the corresponding functions in the "strconv" package of the Go standard
    +// library to work for Prometheus-style strings. Go's special-casing for single
    +// quotes was removed and single quoted strings are now treated the same as
    +// double quoted ones.
    +func Unquote(s string) (t string, err error) {
    +	n := len(s)
    +	if n < 2 {
    +		return "", ErrSyntax
    +	}
    +	quote := s[0]
    +	if quote != s[n-1] {
    +		return "", ErrSyntax
    +	}
    +	s = s[1 : n-1]
    +
    +	if quote == '`' {
    +		if contains(s, '`') {
    +			return "", ErrSyntax
    +		}
    +		return s, nil
    +	}
    +	if quote != '"' && quote != '\'' {
    +		return "", ErrSyntax
    +	}
    +	if contains(s, '\n') {
    +		return "", ErrSyntax
    +	}
    +
    +	// Is it trivial?  Avoid allocation.
    +	if !contains(s, '\\') && !contains(s, quote) {
    +		return s, nil
    +	}
    +
    +	var runeTmp [utf8.UTFMax]byte
    +	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
    +	for len(s) > 0 {
    +		c, multibyte, ss, err := unquoteChar(s, quote)
    +		if err != nil {
    +			return "", err
    +		}
    +		s = ss
    +		if c < utf8.RuneSelf || !multibyte {
    +			buf = append(buf, byte(c))
    +		} else {
    +			n := utf8.EncodeRune(runeTmp[:], c)
    +			buf = append(buf, runeTmp[:n]...)
    +		}
    +	}
    +	return string(buf), nil
    +}
    +
    +// unquoteChar decodes the first character or byte in the escaped string
    +// or character literal represented by the string s.
    +// It returns four values:
    +//
    +//	1) value, the decoded Unicode code point or byte value;
    +//	2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
    +//	3) tail, the remainder of the string after the character; and
    +//	4) an error that will be nil if the character is syntactically valid.
    +//
    +// The second argument, quote, specifies the type of literal being parsed
    +// and therefore which escaped quote character is permitted.
    +// If set to a single quote, it permits the sequence \' and disallows unescaped '.
    +// If set to a double quote, it permits \" and disallows unescaped ".
    +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
    +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
    +	// easy cases
    +	switch c := s[0]; {
    +	case c == quote && (quote == '\'' || quote == '"'):
    +		err = ErrSyntax
    +		return
    +	case c >= utf8.RuneSelf:
    +		r, size := utf8.DecodeRuneInString(s)
    +		return r, true, s[size:], nil
    +	case c != '\\':
    +		return rune(s[0]), false, s[1:], nil
    +	}
    +
    +	// Hard case: c is backslash.
    +	if len(s) <= 1 {
    +		err = ErrSyntax
    +		return
    +	}
    +	c := s[1]
    +	s = s[2:]
    +
    +	switch c {
    +	case 'a':
    +		value = '\a'
    +	case 'b':
    +		value = '\b'
    +	case 'f':
    +		value = '\f'
    +	case 'n':
    +		value = '\n'
    +	case 'r':
    +		value = '\r'
    +	case 't':
    +		value = '\t'
    +	case 'v':
    +		value = '\v'
    +	case 'x', 'u', 'U':
    +		n := 0
    +		switch c {
    +		case 'x':
    +			n = 2
    +		case 'u':
    +			n = 4
    +		case 'U':
    +			n = 8
    +		}
    +		var v rune
    +		if len(s) < n {
    +			err = ErrSyntax
    +			return
    +		}
    +		for j := 0; j < n; j++ {
    +			x, ok := unhex(s[j])
    +			if !ok {
    +				err = ErrSyntax
    +				return
    +			}
    +			v = v<<4 | x
    +		}
    +		s = s[n:]
    +		if c == 'x' {
    +			// Single-byte string, possibly not UTF-8.
    +			value = v
    +			break
    +		}
    +		if v > utf8.MaxRune {
    +			err = ErrSyntax
    +			return
    +		}
    +		value = v
    +		multibyte = true
    +	case '0', '1', '2', '3', '4', '5', '6', '7':
    +		v := rune(c) - '0'
    +		if len(s) < 2 {
    +			err = ErrSyntax
    +			return
    +		}
    +		for j := 0; j < 2; j++ { // One digit already; two more.
    +			x := rune(s[j]) - '0'
    +			if x < 0 || x > 7 {
    +				err = ErrSyntax
    +				return
    +			}
    +			v = (v << 3) | x
    +		}
    +		s = s[2:]
    +		if v > 255 {
    +			err = ErrSyntax
    +			return
    +		}
    +		value = v
    +	case '\\':
    +		value = '\\'
    +	case '\'', '"':
    +		if c != quote {
    +			err = ErrSyntax
    +			return
    +		}
    +		value = rune(c)
    +	default:
    +		err = ErrSyntax
    +		return
    +	}
    +	tail = s
    +	return
    +}
    +
    +// contains reports whether the string contains the byte c.
    +func contains(s string, c byte) bool {
    +	for i := 0; i < len(s); i++ {
    +		if s[i] == c {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +func unhex(b byte) (v rune, ok bool) {
    +	c := rune(b)
    +	switch {
    +	case '0' <= c && c <= '9':
    +		return c - '0', true
    +	case 'a' <= c && c <= 'f':
    +		return c - 'a' + 10, true
    +	case 'A' <= c && c <= 'F':
    +		return c - 'A' + 10, true
    +	}
    +	return
    +}
    diff --git a/src/prometheus/util/strutil/quote_test.go b/src/prometheus/util/strutil/quote_test.go
    new file mode 100644
    index 0000000..0068ada
    --- /dev/null
    +++ b/src/prometheus/util/strutil/quote_test.go
    @@ -0,0 +1,125 @@
    +// Copyright 2015 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package strutil
    +
    +import (
    +	"testing"
    +)
    +
    +type quoteTest struct {
    +	in    string
    +	out   string
    +	ascii string
    +}
    +
    +var quotetests = []quoteTest{
    +	{"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
    +	{"\\", `"\\"`, `"\\"`},
    +	{"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
    +	{"\u263a", `"☺"`, `"\u263a"`},
    +	{"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
    +	{"\x04", `"\x04"`, `"\x04"`},
    +}
    +
    +type unQuoteTest struct {
    +	in  string
    +	out string
    +}
    +
    +var unquotetests = []unQuoteTest{
    +	{`""`, ""},
    +	{`"a"`, "a"},
    +	{`"abc"`, "abc"},
    +	{`"☺"`, "☺"},
    +	{`"hello world"`, "hello world"},
    +	{`"\xFF"`, "\xFF"},
    +	{`"\377"`, "\377"},
    +	{`"\u1234"`, "\u1234"},
    +	{`"\U00010111"`, "\U00010111"},
    +	{`"\U0001011111"`, "\U0001011111"},
    +	{`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
    +	{`"'"`, "'"},
    +
    +	{`''`, ""},
    +	{`'a'`, "a"},
    +	{`'abc'`, "abc"},
    +	{`'☺'`, "☺"},
    +	{`'hello world'`, "hello world"},
    +	{`'\xFF'`, "\xFF"},
    +	{`'\377'`, "\377"},
    +	{`'\u1234'`, "\u1234"},
    +	{`'\U00010111'`, "\U00010111"},
    +	{`'\U0001011111'`, "\U0001011111"},
    +	{`'\a\b\f\n\r\t\v\\\''`, "\a\b\f\n\r\t\v\\'"},
    +	{`'"'`, "\""},
    +
    +	{"``", ``},
    +	{"`a`", `a`},
    +	{"`abc`", `abc`},
    +	{"`☺`", `☺`},
    +	{"`hello world`", `hello world`},
    +	{"`\\xFF`", `\xFF`},
    +	{"`\\377`", `\377`},
    +	{"`\\`", `\`},
    +	{"`\n`", "\n"},
    +	{"`	`", `	`},
    +}
    +
    +var misquoted = []string{
    +	``,
    +	`"`,
    +	`"a`,
    +	`"'`,
    +	`b"`,
    +	`"\"`,
    +	`"\9"`,
    +	`"\19"`,
    +	`"\129"`,
    +	`'\'`,
    +	`'\9'`,
    +	`'\19'`,
    +	`'\129'`,
    +	`"\x1!"`,
    +	`"\U12345678"`,
    +	`"\z"`,
    +	"`",
    +	"`xxx",
    +	"`\"",
    +	`"\'"`,
    +	`'\"'`,
    +	"\"\n\"",
    +	"\"\\n\n\"",
    +	"'\n'",
    +}
    +
    +func TestUnquote(t *testing.T) {
    +	for _, tt := range unquotetests {
    +		if out, err := Unquote(tt.in); err != nil && out != tt.out {
    +			t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
    +		}
    +	}
    +
    +	// Run the quote tests too, backward.
    +	for _, tt := range quotetests {
    +		if in, err := Unquote(tt.out); in != tt.in {
    +			t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
    +		}
    +	}
    +
    +	for _, s := range misquoted {
    +		if out, err := Unquote(s); out != "" || err != ErrSyntax {
    +			t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
    +		}
    +	}
    +}
    diff --git a/src/prometheus/util/strutil/strconv.go b/src/prometheus/util/strutil/strconv.go
    new file mode 100644
    index 0000000..3d96e4f
    --- /dev/null
    +++ b/src/prometheus/util/strutil/strconv.go
    @@ -0,0 +1,44 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package strutil
    +
    +import (
    +	"fmt"
    +	"net/url"
    +	"regexp"
    +)
    +
    +var (
    +	invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
    +)
    +
    +// TableLinkForExpression creates an escaped relative link to the table view of
    +// the provided expression.
    +func TableLinkForExpression(expr string) string {
    +	escapedExpression := url.QueryEscape(expr)
    +	return fmt.Sprintf("/graph?g0.expr=%s&g0.tab=1", escapedExpression)
    +}
    +
    +// GraphLinkForExpression creates an escaped relative link to the graph view of
    +// the provided expression.
    +func GraphLinkForExpression(expr string) string {
    +	escapedExpression := url.QueryEscape(expr)
    +	return fmt.Sprintf("/graph?g0.expr=%s&g0.tab=0", escapedExpression)
    +}
    +
    +// SanitizeLabelName replaces anything that doesn't match
    +// client_label.LabelNameRE with an underscore.
    +func SanitizeLabelName(name string) string {
    +	return invalidLabelCharRE.ReplaceAllString(name, "_")
    +}
    diff --git a/src/prometheus/util/strutil/strconv_test.go b/src/prometheus/util/strutil/strconv_test.go
    new file mode 100644
    index 0000000..acbd092
    --- /dev/null
    +++ b/src/prometheus/util/strutil/strconv_test.go
    @@ -0,0 +1,49 @@
    +// Copyright 2016 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package strutil
    +
    +import (
    +	"testing"
    +)
    +
    +type linkTest struct {
    +	expression        string
    +	expectedGraphLink string
    +	expectedTableLink string
    +}
    +
    +var linkTests = []linkTest{
    +	{
    +		"sum(incoming_http_requests_total) by (system)",
    +		"/graph?g0.expr=sum%28incoming_http_requests_total%29+by+%28system%29&g0.tab=0",
    +		"/graph?g0.expr=sum%28incoming_http_requests_total%29+by+%28system%29&g0.tab=1",
    +	},
    +	{
    +		"sum(incoming_http_requests_total{system=\"trackmetadata\"})",
    +		"/graph?g0.expr=sum%28incoming_http_requests_total%7Bsystem%3D%22trackmetadata%22%7D%29&g0.tab=0",
    +		"/graph?g0.expr=sum%28incoming_http_requests_total%7Bsystem%3D%22trackmetadata%22%7D%29&g0.tab=1",
    +	},
    +}
    +
    +func TestLink(t *testing.T) {
    +	for _, tt := range linkTests {
    +		if graphLink := GraphLinkForExpression(tt.expression); graphLink != tt.expectedGraphLink {
    +			t.Errorf("GraphLinkForExpression failed for expression (%#q), want %q got %q", tt.expression, tt.expectedGraphLink, graphLink)
    +		}
    +
    +		if tableLink := TableLinkForExpression(tt.expression); tableLink != tt.expectedTableLink {
    +			t.Errorf("TableLinkForExpression failed for expression (%#q), want %q got %q", tt.expression, tt.expectedTableLink, tableLink)
    +		}
    +	}
    +}
    diff --git a/src/prometheus/util/testutil/directory.go b/src/prometheus/util/testutil/directory.go
    new file mode 100644
    index 0000000..d3c9c92
    --- /dev/null
    +++ b/src/prometheus/util/testutil/directory.go
    @@ -0,0 +1,129 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package testutil
    +
    +import (
    +	"io/ioutil"
    +	"os"
    +)
    +
    +const (
    +	// The base directory used for test emissions, which instructs the operating
    +	// system to use the default temporary directory as the base or TMPDIR
    +	// environment variable.
    +	defaultDirectory = ""
    +
    +	// NilCloser is a no-op Closer.
    +	NilCloser = nilCloser(true)
    +
    +	// The number of times that a TemporaryDirectory will retry its removal
    +	temporaryDirectoryRemoveRetries = 2
    +)
    +
    +type (
    +	// Closer is the interface that wraps the Close method.
    +	Closer interface {
    +		// Close reaps the underlying directory and its children. The directory
    +		// could be deleted by its users already.
    +		Close()
    +	}
    +
    +	nilCloser bool
    +
    +	// TemporaryDirectory models a closeable path for transient POSIX disk
    +	// activities.
    +	TemporaryDirectory interface {
    +		Closer
    +
    +		// Path returns the underlying path for access.
    +		Path() string
    +	}
    +
    +	// temporaryDirectory is kept as a private type due to private fields and
    +	// their interactions.
    +	temporaryDirectory struct {
    +		path   string
    +		tester T
    +	}
    +
    +	callbackCloser struct {
    +		fn func()
    +	}
    +
    +	// T implements the needed methods of testing.TB so that we do not need
    +	// to actually import testing (which has the side effect of adding all
    +	// the test flags, which we do not want in non-test binaries even if
    +	// they make use of these utilities for some reason).
    +	T interface {
    +		Fatal(args ...interface{})
    +		Fatalf(format string, args ...interface{})
    +	}
    +)
    +
    +func (c nilCloser) Close() {
    +}
    +
    +func (c callbackCloser) Close() {
    +	c.fn()
    +}
    +
    +// NewCallbackCloser returns a Closer that calls the provided function upon
    +// closing.
    +func NewCallbackCloser(fn func()) Closer {
    +	return &callbackCloser{
    +		fn: fn,
    +	}
    +}
    +
    +func (t temporaryDirectory) Close() {
    +	retries := temporaryDirectoryRemoveRetries
    +	err := os.RemoveAll(t.path)
    +	for err != nil && retries > 0 {
    +		switch {
    +		case os.IsNotExist(err):
    +			err = nil
    +		default:
    +			retries--
    +			err = os.RemoveAll(t.path)
    +		}
    +	}
    +	if err != nil {
    +		t.tester.Fatal(err)
    +	}
    +}
    +
    +func (t temporaryDirectory) Path() string {
    +	return t.path
    +}
    +
    +// NewTemporaryDirectory creates a new temporary directory for transient POSIX
    +// activities.
    +func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
    +	var (
    +		directory string
    +		err       error
    +	)
    +
    +	directory, err = ioutil.TempDir(defaultDirectory, name)
    +	if err != nil {
    +		t.Fatal(err)
    +	}
    +
    +	handler = temporaryDirectory{
    +		path:   directory,
    +		tester: t,
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/util/testutil/error.go b/src/prometheus/util/testutil/error.go
    new file mode 100644
    index 0000000..38fa71a
    --- /dev/null
    +++ b/src/prometheus/util/testutil/error.go
    @@ -0,0 +1,27 @@
    +// Copyright 2013 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package testutil
    +
    +// ErrorEqual compares Go errors for equality.
    +func ErrorEqual(left, right error) bool {
    +	if left == right {
    +		return true
    +	}
    +
    +	if left != nil && right != nil {
    +		return left.Error() == right.Error()
    +	}
    +
    +	return false
    +}
    diff --git a/src/prometheus/util/testutil/roundtrip.go b/src/prometheus/util/testutil/roundtrip.go
    new file mode 100644
    index 0000000..996d11f
    --- /dev/null
    +++ b/src/prometheus/util/testutil/roundtrip.go
    @@ -0,0 +1,47 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package testutil
    +
    +import (
    +	"net/http"
    +)
    +
    +type roundTrip struct {
    +	theResponse *http.Response
    +	theError    error
    +}
    +
    +func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) {
    +	return rt.theResponse, rt.theError
    +}
    +
    +type roundTripCheckRequest struct {
    +	checkRequest func(*http.Request)
    +	roundTrip
    +}
    +
    +func (rt *roundTripCheckRequest) RoundTrip(r *http.Request) (*http.Response, error) {
    +	rt.checkRequest(r)
    +	return rt.theResponse, rt.theError
    +}
    +
    +// NewRoundTripCheckRequest creates a new instance of a type that implements http.RoundTripper,
    +// which before returning theResponse and theError, executes checkRequest against a http.Request.
    +func NewRoundTripCheckRequest(checkRequest func(*http.Request), theResponse *http.Response, theError error) http.RoundTripper {
    +	return &roundTripCheckRequest{
    +		checkRequest: checkRequest,
    +		roundTrip: roundTrip{
    +			theResponse: theResponse,
    +			theError:    theError}}
    +}
    diff --git a/src/prometheus/util/testutil/storage.go b/src/prometheus/util/testutil/storage.go
    new file mode 100644
    index 0000000..246d619
    --- /dev/null
    +++ b/src/prometheus/util/testutil/storage.go
    @@ -0,0 +1,56 @@
    +// Copyright 2017 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package testutil
    +
    +import (
    +	"io/ioutil"
    +	"os"
    +	"time"
    +
    +	"github.com/prometheus/common/model"
    +	"github.com/prometheus/prometheus/storage"
    +	"github.com/prometheus/prometheus/storage/tsdb"
    +)
    +
    +// NewStorage returns a new storage for testing purposes
    +// that removes all associated files on closing.
    +func NewStorage(t T) storage.Storage {
    +	dir, err := ioutil.TempDir("", "test_storage")
    +	if err != nil {
    +		t.Fatalf("Opening test dir failed: %s", err)
    +	}
    +
    +	// Tests just load data for a series sequentially. Thus we
    +	// need a long appendable window.
    +	db, err := tsdb.Open(dir, nil, nil, &tsdb.Options{
    +		MinBlockDuration: model.Duration(24 * time.Hour),
    +		MaxBlockDuration: model.Duration(24 * time.Hour),
    +	})
    +	if err != nil {
    +		t.Fatalf("Opening test storage failed: %s", err)
    +	}
    +	return testStorage{Storage: tsdb.Adapter(db, int64(0)), dir: dir}
    +}
    +
    +type testStorage struct {
    +	storage.Storage
    +	dir string
    +}
    +
    +func (s testStorage) Close() error {
    +	if err := s.Storage.Close(); err != nil {
    +		return err
    +	}
    +	return os.RemoveAll(s.dir)
    +}
    diff --git a/src/prometheus/util/testutil/testing.go b/src/prometheus/util/testutil/testing.go
    new file mode 100644
    index 0000000..7b224c6
    --- /dev/null
    +++ b/src/prometheus/util/testutil/testing.go
    @@ -0,0 +1,63 @@
    +// The MIT License (MIT)
    +
    +// Copyright (c) 2014 Ben Johnson
    +
    +// Permission is hereby granted, free of charge, to any person obtaining a copy
    +// of this software and associated documentation files (the "Software"), to deal
    +// in the Software without restriction, including without limitation the rights
    +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +// copies of the Software, and to permit persons to whom the Software is
    +// furnished to do so, subject to the following conditions:
    +
    +// The above copyright notice and this permission notice shall be included in all
    +// copies or substantial portions of the Software.
    +
    +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +// SOFTWARE.
    +
    +package testutil
    +
    +import (
    +	"reflect"
    +	"testing"
    +)
    +
    +// Assert fails the test if the condition is false.
    +func Assert(tb testing.TB, condition bool, format string, a ...interface{}) {
    +	tb.Helper()
    +	if !condition {
    +		tb.Fatalf("\033[31m"+format+"\033[39m\n", a...)
    +	}
    +}
    +
    +// Ok fails the test if an err is not nil.
    +func Ok(tb testing.TB, err error) {
    +	tb.Helper()
    +	if err != nil {
    +		tb.Fatalf("\033[31munexpected error: %v\033[39m\n", err)
    +	}
    +}
    +
    +// NotOk fails the test if an err is nil.
    +func NotOk(tb testing.TB, err error, format string, a ...interface{}) {
    +	tb.Helper()
    +	if err == nil {
    +		if len(a) != 0 {
    +			tb.Fatalf("\033[31m"+format+": expected error, got none\033[39m", a...)
    +		}
    +		tb.Fatalf("\033[31mexpected error, got none\033[39m")
    +	}
    +}
    +
    +// Equals fails the test if exp is not equal to act.
    +func Equals(tb testing.TB, exp, act interface{}) {
    +	tb.Helper()
    +	if !reflect.DeepEqual(exp, act) {
    +		tb.Fatalf("\033[31m\nexp: %#v\n\ngot: %#v\033[39m\n", exp, act)
    +	}
    +}
    diff --git a/src/prometheus/util/treecache/treecache.go b/src/prometheus/util/treecache/treecache.go
    new file mode 100644
    index 0000000..11ecae7
    --- /dev/null
    +++ b/src/prometheus/util/treecache/treecache.go
    @@ -0,0 +1,293 @@
    +// Copyright 2016 The Prometheus Authors
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package treecache
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"strings"
    +	"time"
    +
    +	"github.com/go-kit/kit/log"
    +	"github.com/go-kit/kit/log/level"
    +	"github.com/prometheus/client_golang/prometheus"
    +	"github.com/samuel/go-zookeeper/zk"
    +)
    +
    +var (
    +	failureCounter = prometheus.NewCounter(prometheus.CounterOpts{
    +		Namespace: "prometheus",
    +		Subsystem: "treecache",
    +		Name:      "zookeeper_failures_total",
    +		Help:      "The total number of ZooKeeper failures.",
    +	})
    +	numWatchers = prometheus.NewGauge(prometheus.GaugeOpts{
    +		Namespace: "prometheus",
    +		Subsystem: "treecache",
    +		Name:      "watcher_goroutines",
    +		Help:      "The current number of watcher goroutines.",
    +	})
    +)
    +
    +func init() {
    +	prometheus.MustRegister(failureCounter)
    +	prometheus.MustRegister(numWatchers)
    +}
    +
    +// ZookeeperLogger wraps a log.Logger into a zk.Logger.
    +type ZookeeperLogger struct {
    +	logger log.Logger
    +}
    +
    +// NewZookeeperLogger is a constructor for ZookeeperLogger.
    +func NewZookeeperLogger(logger log.Logger) ZookeeperLogger {
    +	return ZookeeperLogger{logger: logger}
    +}
    +
    +// Printf implements zk.Logger.
    +func (zl ZookeeperLogger) Printf(s string, i ...interface{}) {
    +	level.Info(zl.logger).Log("msg", fmt.Sprintf(s, i...))
    +}
    +
    +// A ZookeeperTreeCache keeps data from all children of a Zookeeper path
    +// locally cached and updated according to received events.
    +type ZookeeperTreeCache struct {
    +	conn     *zk.Conn
    +	prefix   string
    +	events   chan ZookeeperTreeCacheEvent
    +	zkEvents chan zk.Event
    +	stop     chan struct{}
    +	head     *zookeeperTreeCacheNode
    +
    +	logger log.Logger
    +}
    +
    +// A ZookeeperTreeCacheEvent models a Zookeeper event for a path.
    +type ZookeeperTreeCacheEvent struct {
    +	Path string
    +	Data *[]byte
    +}
    +
    +type zookeeperTreeCacheNode struct {
    +	data     *[]byte
    +	events   chan zk.Event
    +	done     chan struct{}
    +	stopped  bool
    +	children map[string]*zookeeperTreeCacheNode
    +}
    +
    +// NewZookeeperTreeCache creates a new ZookeeperTreeCache for a given path.
    +func NewZookeeperTreeCache(conn *zk.Conn, path string, events chan ZookeeperTreeCacheEvent, logger log.Logger) *ZookeeperTreeCache {
    +	tc := &ZookeeperTreeCache{
    +		conn:   conn,
    +		prefix: path,
    +		events: events,
    +		stop:   make(chan struct{}),
    +
    +		logger: logger,
    +	}
    +	tc.head = &zookeeperTreeCacheNode{
    +		events:   make(chan zk.Event),
    +		children: map[string]*zookeeperTreeCacheNode{},
    +		stopped:  true,
    +	}
    +	go tc.loop(path)
    +	return tc
    +}
    +
    +// Stop stops the tree cache.
    +func (tc *ZookeeperTreeCache) Stop() {
    +	tc.stop <- struct{}{}
    +}
    +
    +func (tc *ZookeeperTreeCache) loop(path string) {
    +	failureMode := false
    +	retryChan := make(chan struct{})
    +
    +	failure := func() {
    +		failureCounter.Inc()
    +		failureMode = true
    +		time.AfterFunc(time.Second*10, func() {
    +			retryChan <- struct{}{}
    +		})
    +	}
    +
    +	err := tc.recursiveNodeUpdate(path, tc.head)
    +	if err != nil {
    +		level.Error(tc.logger).Log("msg", "Error during initial read of Zookeeper", "err", err)
    +		failure()
    +	}
    +
    +	for {
    +		select {
    +		case ev := <-tc.head.events:
    +			level.Debug(tc.logger).Log("msg", "Received Zookeeper event", "event", ev)
    +			if failureMode {
    +				continue
    +			}
    +
    +			if ev.Type == zk.EventNotWatching {
    +				level.Info(tc.logger).Log("msg", "Lost connection to Zookeeper.")
    +				failure()
    +			} else {
    +				path := strings.TrimPrefix(ev.Path, tc.prefix)
    +				parts := strings.Split(path, "/")
    +				node := tc.head
    +				for _, part := range parts[1:] {
    +					childNode := node.children[part]
    +					if childNode == nil {
    +						childNode = &zookeeperTreeCacheNode{
    +							events:   tc.head.events,
    +							children: map[string]*zookeeperTreeCacheNode{},
    +							done:     make(chan struct{}, 1),
    +						}
    +						node.children[part] = childNode
    +					}
    +					node = childNode
    +				}
    +
    +				err := tc.recursiveNodeUpdate(ev.Path, node)
    +				if err != nil {
    +					level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err)
    +					failure()
    +				} else if tc.head.data == nil {
    +					level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix)
    +					failure()
    +				}
    +			}
    +		case <-retryChan:
    +			level.Info(tc.logger).Log("msg", "Attempting to resync state with Zookeeper")
    +			previousState := &zookeeperTreeCacheNode{
    +				children: tc.head.children,
    +			}
    +			// Reset root child nodes before traversing the Zookeeper path.
    +			tc.head.children = make(map[string]*zookeeperTreeCacheNode)
    +
    +			if err := tc.recursiveNodeUpdate(tc.prefix, tc.head); err != nil {
    +				level.Error(tc.logger).Log("msg", "Error during Zookeeper resync", "err", err)
    +				// Revert to our previous state.
    +				tc.head.children = previousState.children
    +				failure()
    +			} else {
    +				tc.resyncState(tc.prefix, tc.head, previousState)
    +				level.Info(tc.logger).Log("Zookeeper resync successful")
    +				failureMode = false
    +			}
    +		case <-tc.stop:
    +			tc.recursiveStop(tc.head)
    +			return
    +		}
    +	}
    +}
    +
    +func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error {
    +	data, _, dataWatcher, err := tc.conn.GetW(path)
    +	if err == zk.ErrNoNode {
    +		tc.recursiveDelete(path, node)
    +		if node == tc.head {
    +			return fmt.Errorf("path %s does not exist", path)
    +		}
    +		return nil
    +	} else if err != nil {
    +		return err
    +	}
    +
    +	if node.data == nil || !bytes.Equal(*node.data, data) {
    +		node.data = &data
    +		tc.events <- ZookeeperTreeCacheEvent{Path: path, Data: node.data}
    +	}
    +
    +	children, _, childWatcher, err := tc.conn.ChildrenW(path)
    +	if err == zk.ErrNoNode {
    +		tc.recursiveDelete(path, node)
    +		return nil
    +	} else if err != nil {
    +		return err
    +	}
    +
    +	currentChildren := map[string]struct{}{}
    +	for _, child := range children {
    +		currentChildren[child] = struct{}{}
    +		childNode := node.children[child]
    +		// Does not already exists or we previous had a watch that
    +		// triggered.
    +		if childNode == nil || childNode.stopped {
    +			node.children[child] = &zookeeperTreeCacheNode{
    +				events:   node.events,
    +				children: map[string]*zookeeperTreeCacheNode{},
    +				done:     make(chan struct{}, 1),
    +			}
    +			err = tc.recursiveNodeUpdate(path+"/"+child, node.children[child])
    +			if err != nil {
    +				return err
    +			}
    +		}
    +	}
    +
    +	// Remove nodes that no longer exist
    +	for name, childNode := range node.children {
    +		if _, ok := currentChildren[name]; !ok || node.data == nil {
    +			tc.recursiveDelete(path+"/"+name, childNode)
    +			delete(node.children, name)
    +		}
    +	}
    +
    +	go func() {
    +		numWatchers.Inc()
    +		// Pass up zookeeper events, until the node is deleted.
    +		select {
    +		case event := <-dataWatcher:
    +			node.events <- event
    +		case event := <-childWatcher:
    +			node.events <- event
    +		case <-node.done:
    +		}
    +		numWatchers.Dec()
    +	}()
    +	return nil
    +}
    +
    +func (tc *ZookeeperTreeCache) resyncState(path string, currentState, previousState *zookeeperTreeCacheNode) {
    +	for child, previousNode := range previousState.children {
    +		if currentNode, present := currentState.children[child]; present {
    +			tc.resyncState(path+"/"+child, currentNode, previousNode)
    +		} else {
    +			tc.recursiveDelete(path+"/"+child, previousNode)
    +		}
    +	}
    +}
    +
    +func (tc *ZookeeperTreeCache) recursiveDelete(path string, node *zookeeperTreeCacheNode) {
    +	if !node.stopped {
    +		node.done <- struct{}{}
    +		node.stopped = true
    +	}
    +	if node.data != nil {
    +		tc.events <- ZookeeperTreeCacheEvent{Path: path, Data: nil}
    +		node.data = nil
    +	}
    +	for name, childNode := range node.children {
    +		tc.recursiveDelete(path+"/"+name, childNode)
    +	}
    +}
    +
    +func (tc *ZookeeperTreeCache) recursiveStop(node *zookeeperTreeCacheNode) {
    +	if !node.stopped {
    +		node.done <- struct{}{}
    +		node.stopped = true
    +	}
    +	for _, childNode := range node.children {
    +		tc.recursiveStop(childNode)
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
    new file mode 100644
    index 0000000..af39a91
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
    @@ -0,0 +1,202 @@
    +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright 2016 Microsoft Corporation
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go
    new file mode 100644
    index 0000000..f1789c9
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go
    @@ -0,0 +1,366 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// AvailabilitySetsClient is the the Compute Management Client.
    +type AvailabilitySetsClient struct {
    +	ManagementClient
    +}
    +
    +// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient
    +// client.
    +func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient {
    +	return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewAvailabilitySetsClientWithBaseURI creates an instance of the
    +// AvailabilitySetsClient client.
    +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient {
    +	return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the operation to create or update the availability set.
    +//
    +// resourceGroupName is the name of the resource group. name is parameters
    +// supplied to the Create Availability Set operation. parameters is
    +// parameters supplied to the Create Availability Set operation.
    +func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.Statuses", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters AvailabilitySet) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"name":              autorest.Encode("path", name),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// Delete the operation to delete the availability set.
    +//
    +// resourceGroupName is the name of the resource group. availabilitySetName is
    +// the name of the availability set.
    +func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, availabilitySetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"availabilitySetName": autorest.Encode("path", availabilitySetName),
    +		"resourceGroupName":   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":      autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the operation to get the availability set.
    +//
    +// resourceGroupName is the name of the resource group. availabilitySetName is
    +// the name of the availability set.
    +func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, availabilitySetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"availabilitySetName": autorest.Encode("path", availabilitySetName),
    +		"resourceGroupName":   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":      autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the operation to list the availability sets.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAvailableSizes lists all available virtual machine sizes that can be
    +// used to create a new virtual machine in an existing availability set.
    +//
    +// resourceGroupName is the name of the resource group. availabilitySetName is
    +// the name of the availability set.
    +func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) {
    +	req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAvailableSizesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAvailableSizesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAvailableSizesPreparer prepares the ListAvailableSizes request.
    +func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"availabilitySetName": autorest.Encode("path", availabilitySetName),
    +		"resourceGroupName":   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":      autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always
    +// closes the http.Response Body.
    +func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go
    new file mode 100644
    index 0000000..e8f3fb3
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go
    @@ -0,0 +1,58 @@
    +// Package compute implements the Azure ARM Compute service API version
    +// 2016-03-30.
    +//
    +// The Compute Management Client.
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +)
    +
    +const (
    +	// APIVersion is the version of the Compute
    +	APIVersion = "2016-03-30"
    +
    +	// DefaultBaseURI is the default URI used for the service Compute
    +	DefaultBaseURI = "https://management.azure.com"
    +)
    +
    +// ManagementClient is the base client for Compute.
    +type ManagementClient struct {
    +	autorest.Client
    +	BaseURI        string
    +	APIVersion     string
    +	SubscriptionID string
    +}
    +
    +// New creates an instance of the ManagementClient client.
    +func New(subscriptionID string) ManagementClient {
    +	return NewWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewWithBaseURI creates an instance of the ManagementClient client.
    +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
    +	return ManagementClient{
    +		Client:         autorest.NewClientWithUserAgent(UserAgent()),
    +		BaseURI:        baseURI,
    +		APIVersion:     APIVersion,
    +		SubscriptionID: subscriptionID,
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go
    new file mode 100644
    index 0000000..80570cb
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go
    @@ -0,0 +1,1180 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/date"
    +	"github.com/Azure/go-autorest/autorest/to"
    +	"net/http"
    +)
    +
    +// CachingTypes enumerates the values for caching types.
    +type CachingTypes string
    +
    +const (
    +	// None specifies the none state for caching types.
    +	None CachingTypes = "None"
    +	// ReadOnly specifies the read only state for caching types.
    +	ReadOnly CachingTypes = "ReadOnly"
    +	// ReadWrite specifies the read write state for caching types.
    +	ReadWrite CachingTypes = "ReadWrite"
    +)
    +
    +// ComponentNames enumerates the values for component names.
    +type ComponentNames string
    +
    +const (
    +	// MicrosoftWindowsShellSetup specifies the microsoft windows shell setup
    +	// state for component names.
    +	MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup"
    +)
    +
    +// DiskCreateOptionTypes enumerates the values for disk create option types.
    +type DiskCreateOptionTypes string
    +
    +const (
    +	// Attach specifies the attach state for disk create option types.
    +	Attach DiskCreateOptionTypes = "attach"
    +	// Empty specifies the empty state for disk create option types.
    +	Empty DiskCreateOptionTypes = "empty"
    +	// FromImage specifies the from image state for disk create option types.
    +	FromImage DiskCreateOptionTypes = "fromImage"
    +)
    +
    +// InstanceViewTypes enumerates the values for instance view types.
    +type InstanceViewTypes string
    +
    +const (
    +	// InstanceView specifies the instance view state for instance view types.
    +	InstanceView InstanceViewTypes = "instanceView"
    +)
    +
    +// OperatingSystemTypes enumerates the values for operating system types.
    +type OperatingSystemTypes string
    +
    +const (
    +	// Linux specifies the linux state for operating system types.
    +	Linux OperatingSystemTypes = "Linux"
    +	// Windows specifies the windows state for operating system types.
    +	Windows OperatingSystemTypes = "Windows"
    +)
    +
    +// PassNames enumerates the values for pass names.
    +type PassNames string
    +
    +const (
    +	// OobeSystem specifies the oobe system state for pass names.
    +	OobeSystem PassNames = "oobeSystem"
    +)
    +
    +// ProtocolTypes enumerates the values for protocol types.
    +type ProtocolTypes string
    +
    +const (
    +	// HTTP specifies the http state for protocol types.
    +	HTTP ProtocolTypes = "Http"
    +	// HTTPS specifies the https state for protocol types.
    +	HTTPS ProtocolTypes = "Https"
    +)
    +
    +// SettingNames enumerates the values for setting names.
    +type SettingNames string
    +
    +const (
    +	// AutoLogon specifies the auto logon state for setting names.
    +	AutoLogon SettingNames = "AutoLogon"
    +	// FirstLogonCommands specifies the first logon commands state for setting
    +	// names.
    +	FirstLogonCommands SettingNames = "FirstLogonCommands"
    +)
    +
    +// StatusLevelTypes enumerates the values for status level types.
    +type StatusLevelTypes string
    +
    +const (
    +	// Error specifies the error state for status level types.
    +	Error StatusLevelTypes = "Error"
    +	// Info specifies the info state for status level types.
    +	Info StatusLevelTypes = "Info"
    +	// Warning specifies the warning state for status level types.
    +	Warning StatusLevelTypes = "Warning"
    +)
    +
    +// UpgradeMode enumerates the values for upgrade mode.
    +type UpgradeMode string
    +
    +const (
    +	// Automatic specifies the automatic state for upgrade mode.
    +	Automatic UpgradeMode = "Automatic"
    +	// Manual specifies the manual state for upgrade mode.
    +	Manual UpgradeMode = "Manual"
    +)
    +
    +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual
    +// machine scale set sku scale type.
    +type VirtualMachineScaleSetSkuScaleType string
    +
    +const (
    +	// VirtualMachineScaleSetSkuScaleTypeAutomatic specifies the virtual
    +	// machine scale set sku scale type automatic state for virtual machine
    +	// scale set sku scale type.
    +	VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic"
    +	// VirtualMachineScaleSetSkuScaleTypeNone specifies the virtual machine
    +	// scale set sku scale type none state for virtual machine scale set sku
    +	// scale type.
    +	VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None"
    +)
    +
    +// VirtualMachineSizeTypes enumerates the values for virtual machine size
    +// types.
    +type VirtualMachineSizeTypes string
    +
    +const (
    +	// BasicA0 specifies the basic a0 state for virtual machine size types.
    +	BasicA0 VirtualMachineSizeTypes = "Basic_A0"
    +	// BasicA1 specifies the basic a1 state for virtual machine size types.
    +	BasicA1 VirtualMachineSizeTypes = "Basic_A1"
    +	// BasicA2 specifies the basic a2 state for virtual machine size types.
    +	BasicA2 VirtualMachineSizeTypes = "Basic_A2"
    +	// BasicA3 specifies the basic a3 state for virtual machine size types.
    +	BasicA3 VirtualMachineSizeTypes = "Basic_A3"
    +	// BasicA4 specifies the basic a4 state for virtual machine size types.
    +	BasicA4 VirtualMachineSizeTypes = "Basic_A4"
    +	// StandardA0 specifies the standard a0 state for virtual machine size
    +	// types.
    +	StandardA0 VirtualMachineSizeTypes = "Standard_A0"
    +	// StandardA1 specifies the standard a1 state for virtual machine size
    +	// types.
    +	StandardA1 VirtualMachineSizeTypes = "Standard_A1"
    +	// StandardA10 specifies the standard a10 state for virtual machine size
    +	// types.
    +	StandardA10 VirtualMachineSizeTypes = "Standard_A10"
    +	// StandardA11 specifies the standard a11 state for virtual machine size
    +	// types.
    +	StandardA11 VirtualMachineSizeTypes = "Standard_A11"
    +	// StandardA2 specifies the standard a2 state for virtual machine size
    +	// types.
    +	StandardA2 VirtualMachineSizeTypes = "Standard_A2"
    +	// StandardA3 specifies the standard a3 state for virtual machine size
    +	// types.
    +	StandardA3 VirtualMachineSizeTypes = "Standard_A3"
    +	// StandardA4 specifies the standard a4 state for virtual machine size
    +	// types.
    +	StandardA4 VirtualMachineSizeTypes = "Standard_A4"
    +	// StandardA5 specifies the standard a5 state for virtual machine size
    +	// types.
    +	StandardA5 VirtualMachineSizeTypes = "Standard_A5"
    +	// StandardA6 specifies the standard a6 state for virtual machine size
    +	// types.
    +	StandardA6 VirtualMachineSizeTypes = "Standard_A6"
    +	// StandardA7 specifies the standard a7 state for virtual machine size
    +	// types.
    +	StandardA7 VirtualMachineSizeTypes = "Standard_A7"
    +	// StandardA8 specifies the standard a8 state for virtual machine size
    +	// types.
    +	StandardA8 VirtualMachineSizeTypes = "Standard_A8"
    +	// StandardA9 specifies the standard a9 state for virtual machine size
    +	// types.
    +	StandardA9 VirtualMachineSizeTypes = "Standard_A9"
    +	// StandardD1 specifies the standard d1 state for virtual machine size
    +	// types.
    +	StandardD1 VirtualMachineSizeTypes = "Standard_D1"
    +	// StandardD11 specifies the standard d11 state for virtual machine size
    +	// types.
    +	StandardD11 VirtualMachineSizeTypes = "Standard_D11"
    +	// StandardD11V2 specifies the standard d11v2 state for virtual machine
    +	// size types.
    +	StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2"
    +	// StandardD12 specifies the standard d12 state for virtual machine size
    +	// types.
    +	StandardD12 VirtualMachineSizeTypes = "Standard_D12"
    +	// StandardD12V2 specifies the standard d12v2 state for virtual machine
    +	// size types.
    +	StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2"
    +	// StandardD13 specifies the standard d13 state for virtual machine size
    +	// types.
    +	StandardD13 VirtualMachineSizeTypes = "Standard_D13"
    +	// StandardD13V2 specifies the standard d13v2 state for virtual machine
    +	// size types.
    +	StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2"
    +	// StandardD14 specifies the standard d14 state for virtual machine size
    +	// types.
    +	StandardD14 VirtualMachineSizeTypes = "Standard_D14"
    +	// StandardD14V2 specifies the standard d14v2 state for virtual machine
    +	// size types.
    +	StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2"
    +	// StandardD15V2 specifies the standard d15v2 state for virtual machine
    +	// size types.
    +	StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2"
    +	// StandardD1V2 specifies the standard d1v2 state for virtual machine size
    +	// types.
    +	StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2"
    +	// StandardD2 specifies the standard d2 state for virtual machine size
    +	// types.
    +	StandardD2 VirtualMachineSizeTypes = "Standard_D2"
    +	// StandardD2V2 specifies the standard d2v2 state for virtual machine size
    +	// types.
    +	StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2"
    +	// StandardD3 specifies the standard d3 state for virtual machine size
    +	// types.
    +	StandardD3 VirtualMachineSizeTypes = "Standard_D3"
    +	// StandardD3V2 specifies the standard d3v2 state for virtual machine size
    +	// types.
    +	StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2"
    +	// StandardD4 specifies the standard d4 state for virtual machine size
    +	// types.
    +	StandardD4 VirtualMachineSizeTypes = "Standard_D4"
    +	// StandardD4V2 specifies the standard d4v2 state for virtual machine size
    +	// types.
    +	StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2"
    +	// StandardD5V2 specifies the standard d5v2 state for virtual machine size
    +	// types.
    +	StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2"
    +	// StandardDS1 specifies the standard ds1 state for virtual machine size
    +	// types.
    +	StandardDS1 VirtualMachineSizeTypes = "Standard_DS1"
    +	// StandardDS11 specifies the standard ds11 state for virtual machine size
    +	// types.
    +	StandardDS11 VirtualMachineSizeTypes = "Standard_DS11"
    +	// StandardDS11V2 specifies the standard ds11v2 state for virtual machine
    +	// size types.
    +	StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2"
    +	// StandardDS12 specifies the standard ds12 state for virtual machine size
    +	// types.
    +	StandardDS12 VirtualMachineSizeTypes = "Standard_DS12"
    +	// StandardDS12V2 specifies the standard ds12v2 state for virtual machine
    +	// size types.
    +	StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2"
    +	// StandardDS13 specifies the standard ds13 state for virtual machine size
    +	// types.
    +	StandardDS13 VirtualMachineSizeTypes = "Standard_DS13"
    +	// StandardDS13V2 specifies the standard ds13v2 state for virtual machine
    +	// size types.
    +	StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2"
    +	// StandardDS14 specifies the standard ds14 state for virtual machine size
    +	// types.
    +	StandardDS14 VirtualMachineSizeTypes = "Standard_DS14"
    +	// StandardDS14V2 specifies the standard ds14v2 state for virtual machine
    +	// size types.
    +	StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2"
    +	// StandardDS15V2 specifies the standard ds15v2 state for virtual machine
    +	// size types.
    +	StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2"
    +	// StandardDS1V2 specifies the standard ds1v2 state for virtual machine
    +	// size types.
    +	StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2"
    +	// StandardDS2 specifies the standard ds2 state for virtual machine size
    +	// types.
    +	StandardDS2 VirtualMachineSizeTypes = "Standard_DS2"
    +	// StandardDS2V2 specifies the standard ds2v2 state for virtual machine
    +	// size types.
    +	StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2"
    +	// StandardDS3 specifies the standard ds3 state for virtual machine size
    +	// types.
    +	StandardDS3 VirtualMachineSizeTypes = "Standard_DS3"
    +	// StandardDS3V2 specifies the standard ds3v2 state for virtual machine
    +	// size types.
    +	StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2"
    +	// StandardDS4 specifies the standard ds4 state for virtual machine size
    +	// types.
    +	StandardDS4 VirtualMachineSizeTypes = "Standard_DS4"
    +	// StandardDS4V2 specifies the standard ds4v2 state for virtual machine
    +	// size types.
    +	StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2"
    +	// StandardDS5V2 specifies the standard ds5v2 state for virtual machine
    +	// size types.
    +	StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2"
    +	// StandardG1 specifies the standard g1 state for virtual machine size
    +	// types.
    +	StandardG1 VirtualMachineSizeTypes = "Standard_G1"
    +	// StandardG2 specifies the standard g2 state for virtual machine size
    +	// types.
    +	StandardG2 VirtualMachineSizeTypes = "Standard_G2"
    +	// StandardG3 specifies the standard g3 state for virtual machine size
    +	// types.
    +	StandardG3 VirtualMachineSizeTypes = "Standard_G3"
    +	// StandardG4 specifies the standard g4 state for virtual machine size
    +	// types.
    +	StandardG4 VirtualMachineSizeTypes = "Standard_G4"
    +	// StandardG5 specifies the standard g5 state for virtual machine size
    +	// types.
    +	StandardG5 VirtualMachineSizeTypes = "Standard_G5"
    +	// StandardGS1 specifies the standard gs1 state for virtual machine size
    +	// types.
    +	StandardGS1 VirtualMachineSizeTypes = "Standard_GS1"
    +	// StandardGS2 specifies the standard gs2 state for virtual machine size
    +	// types.
    +	StandardGS2 VirtualMachineSizeTypes = "Standard_GS2"
    +	// StandardGS3 specifies the standard gs3 state for virtual machine size
    +	// types.
    +	StandardGS3 VirtualMachineSizeTypes = "Standard_GS3"
    +	// StandardGS4 specifies the standard gs4 state for virtual machine size
    +	// types.
    +	StandardGS4 VirtualMachineSizeTypes = "Standard_GS4"
    +	// StandardGS5 specifies the standard gs5 state for virtual machine size
    +	// types.
    +	StandardGS5 VirtualMachineSizeTypes = "Standard_GS5"
    +)
    +
    +// AdditionalUnattendContent is additional XML formatted information that can
    +// be included in the Unattend.xml file, which is used by Windows Setup.
    +// Contents are defined by setting name, component name, and the pass in
    +// which the content is a applied.
    +type AdditionalUnattendContent struct {
    +	PassName      PassNames      `json:"passName,omitempty"`
    +	ComponentName ComponentNames `json:"componentName,omitempty"`
    +	SettingName   SettingNames   `json:"settingName,omitempty"`
    +	Content       *string        `json:"content,omitempty"`
    +}
    +
    +// APIEntityReference is the API entity reference.
    +type APIEntityReference struct {
    +	ID *string `json:"id,omitempty"`
    +}
    +
    +// APIError is api error.
    +type APIError struct {
    +	Details    *[]APIErrorBase `json:"details,omitempty"`
    +	Innererror *InnerError     `json:"innererror,omitempty"`
    +	Code       *string         `json:"code,omitempty"`
    +	Target     *string         `json:"target,omitempty"`
    +	Message    *string         `json:"message,omitempty"`
    +}
    +
    +// APIErrorBase is api error base.
    +type APIErrorBase struct {
    +	Code    *string `json:"code,omitempty"`
    +	Target  *string `json:"target,omitempty"`
    +	Message *string `json:"message,omitempty"`
    +}
    +
    +// AvailabilitySet is create or update Availability Set parameters.
    +type AvailabilitySet struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                    `json:"id,omitempty"`
    +	Name              *string                    `json:"name,omitempty"`
    +	Type              *string                    `json:"type,omitempty"`
    +	Location          *string                    `json:"location,omitempty"`
    +	Tags              *map[string]*string        `json:"tags,omitempty"`
    +	Properties        *AvailabilitySetProperties `json:"properties,omitempty"`
    +}
    +
    +// AvailabilitySetListResult is the List Availability Set operation response.
    +type AvailabilitySetListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]AvailabilitySet `json:"value,omitempty"`
    +}
    +
    +// AvailabilitySetProperties is the instance view of a resource.
    +type AvailabilitySetProperties struct {
    +	PlatformUpdateDomainCount *int32                `json:"platformUpdateDomainCount,omitempty"`
    +	PlatformFaultDomainCount  *int32                `json:"platformFaultDomainCount,omitempty"`
    +	VirtualMachines           *[]SubResource        `json:"virtualMachines,omitempty"`
    +	Statuses                  *[]InstanceViewStatus `json:"statuses,omitempty"`
    +}
    +
    +// BootDiagnostics is describes Boot Diagnostics.
    +type BootDiagnostics struct {
    +	Enabled    *bool   `json:"enabled,omitempty"`
    +	StorageURI *string `json:"storageUri,omitempty"`
    +}
    +
    +// BootDiagnosticsInstanceView is the instance view of a virtual machine boot
    +// diagnostics.
    +type BootDiagnosticsInstanceView struct {
    +	ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"`
    +	SerialConsoleLogBlobURI  *string `json:"serialConsoleLogBlobUri,omitempty"`
    +}
    +
    +// DataDisk is describes a data disk.
    +type DataDisk struct {
    +	Lun          *int32                `json:"lun,omitempty"`
    +	Name         *string               `json:"name,omitempty"`
    +	Vhd          *VirtualHardDisk      `json:"vhd,omitempty"`
    +	Image        *VirtualHardDisk      `json:"image,omitempty"`
    +	Caching      CachingTypes          `json:"caching,omitempty"`
    +	CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
    +	DiskSizeGB   *int32                `json:"diskSizeGB,omitempty"`
    +}
    +
    +// DataDiskImage is contains the data disk images information.
    +type DataDiskImage struct {
    +	Lun *int32 `json:"lun,omitempty"`
    +}
    +
    +// DiagnosticsProfile is describes a diagnostics profile.
    +type DiagnosticsProfile struct {
    +	BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"`
    +}
    +
    +// DiskEncryptionSettings is describes a Encryption Settings for a Disk
    +type DiskEncryptionSettings struct {
    +	DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"`
    +	KeyEncryptionKey  *KeyVaultKeyReference    `json:"keyEncryptionKey,omitempty"`
    +	Enabled           *bool                    `json:"enabled,omitempty"`
    +}
    +
    +// DiskInstanceView is the instance view of the disk.
    +type DiskInstanceView struct {
    +	Name     *string               `json:"name,omitempty"`
    +	Statuses *[]InstanceViewStatus `json:"statuses,omitempty"`
    +}
    +
    +// HardwareProfile is describes a hardware profile.
    +type HardwareProfile struct {
    +	VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"`
    +}
    +
    +// ImageReference is the image reference.
    +type ImageReference struct {
    +	Publisher *string `json:"publisher,omitempty"`
    +	Offer     *string `json:"offer,omitempty"`
    +	Sku       *string `json:"sku,omitempty"`
    +	Version   *string `json:"version,omitempty"`
    +}
    +
    +// InnerError is inner error details.
    +type InnerError struct {
    +	Exceptiontype *string `json:"exceptiontype,omitempty"`
    +	Errordetail   *string `json:"errordetail,omitempty"`
    +}
    +
    +// InstanceViewStatus is instance view status.
    +type InstanceViewStatus struct {
    +	Code          *string          `json:"code,omitempty"`
    +	Level         StatusLevelTypes `json:"level,omitempty"`
    +	DisplayStatus *string          `json:"displayStatus,omitempty"`
    +	Message       *string          `json:"message,omitempty"`
    +	Time          *date.Time       `json:"time,omitempty"`
    +}
    +
    +// KeyVaultKeyReference is describes a reference to Key Vault Key
    +type KeyVaultKeyReference struct {
    +	KeyURL      *string      `json:"keyUrl,omitempty"`
    +	SourceVault *SubResource `json:"sourceVault,omitempty"`
    +}
    +
    +// KeyVaultSecretReference is describes a reference to Key Vault Secret
    +type KeyVaultSecretReference struct {
    +	SecretURL   *string      `json:"secretUrl,omitempty"`
    +	SourceVault *SubResource `json:"sourceVault,omitempty"`
    +}
    +
    +// LinuxConfiguration is describes Windows Configuration of the OS Profile.
    +type LinuxConfiguration struct {
    +	DisablePasswordAuthentication *bool             `json:"disablePasswordAuthentication,omitempty"`
    +	SSH                           *SSHConfiguration `json:"ssh,omitempty"`
    +}
    +
    +// ListUsagesResult is the List Usages operation response.
    +type ListUsagesResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]Usage `json:"value,omitempty"`
    +	NextLink          *string  `json:"nextLink,omitempty"`
    +}
    +
    +// ListUsagesResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client ListUsagesResult) ListUsagesResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// ListVirtualMachineExtensionImage is
    +type ListVirtualMachineExtensionImage struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineExtensionImage `json:"value,omitempty"`
    +}
    +
    +// ListVirtualMachineImageResource is
    +type ListVirtualMachineImageResource struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineImageResource `json:"value,omitempty"`
    +}
    +
    +// LongRunningOperationProperties is compute-specific operation properties,
    +// including output
    +type LongRunningOperationProperties struct {
    +	Output *map[string]interface{} `json:"output,omitempty"`
    +}
    +
    +// NetworkInterfaceReference is describes a network interface reference.
    +type NetworkInterfaceReference struct {
    +	ID         *string                              `json:"id,omitempty"`
    +	Properties *NetworkInterfaceReferenceProperties `json:"properties,omitempty"`
    +}
    +
    +// NetworkInterfaceReferenceProperties is describes a network interface
    +// reference properties.
    +type NetworkInterfaceReferenceProperties struct {
    +	Primary *bool `json:"primary,omitempty"`
    +}
    +
    +// NetworkProfile is describes a network profile.
    +type NetworkProfile struct {
    +	NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"`
    +}
    +
    +// OSDisk is describes an Operating System disk.
    +type OSDisk struct {
    +	OsType             OperatingSystemTypes    `json:"osType,omitempty"`
    +	EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"`
    +	Name               *string                 `json:"name,omitempty"`
    +	Vhd                *VirtualHardDisk        `json:"vhd,omitempty"`
    +	Image              *VirtualHardDisk        `json:"image,omitempty"`
    +	Caching            CachingTypes            `json:"caching,omitempty"`
    +	CreateOption       DiskCreateOptionTypes   `json:"createOption,omitempty"`
    +	DiskSizeGB         *int32                  `json:"diskSizeGB,omitempty"`
    +}
    +
    +// OSDiskImage is contains the os disk image information.
    +type OSDiskImage struct {
    +	OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"`
    +}
    +
    +// OSProfile is describes an OS profile.
    +type OSProfile struct {
    +	ComputerName         *string               `json:"computerName,omitempty"`
    +	AdminUsername        *string               `json:"adminUsername,omitempty"`
    +	AdminPassword        *string               `json:"adminPassword,omitempty"`
    +	CustomData           *string               `json:"customData,omitempty"`
    +	WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
    +	LinuxConfiguration   *LinuxConfiguration   `json:"linuxConfiguration,omitempty"`
    +	Secrets              *[]VaultSecretGroup   `json:"secrets,omitempty"`
    +}
    +
    +// Plan is plan for the resource.
    +type Plan struct {
    +	Name          *string `json:"name,omitempty"`
    +	Publisher     *string `json:"publisher,omitempty"`
    +	Product       *string `json:"product,omitempty"`
    +	PromotionCode *string `json:"promotionCode,omitempty"`
    +}
    +
    +// PurchasePlan is used for establishing the purchase context of any 3rd Party
    +// artifact through MarketPlace.
    +type PurchasePlan struct {
    +	Publisher *string `json:"publisher,omitempty"`
    +	Name      *string `json:"name,omitempty"`
    +	Product   *string `json:"product,omitempty"`
    +}
    +
    +// Resource is the Resource model definition.
    +type Resource struct {
    +	ID       *string             `json:"id,omitempty"`
    +	Name     *string             `json:"name,omitempty"`
    +	Type     *string             `json:"type,omitempty"`
    +	Location *string             `json:"location,omitempty"`
    +	Tags     *map[string]*string `json:"tags,omitempty"`
    +}
    +
    +// Sku is describes a virtual machine scale set sku.
    +type Sku struct {
    +	Name     *string `json:"name,omitempty"`
    +	Tier     *string `json:"tier,omitempty"`
    +	Capacity *int64  `json:"capacity,omitempty"`
    +}
    +
    +// SSHConfiguration is sSH configuration for Linux based VMs running on Azure
    +type SSHConfiguration struct {
    +	PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"`
    +}
    +
    +// SSHPublicKey is contains information about SSH certificate public key and
    +// the path on the Linux VM where the public key is placed.
    +type SSHPublicKey struct {
    +	Path    *string `json:"path,omitempty"`
    +	KeyData *string `json:"keyData,omitempty"`
    +}
    +
    +// StorageProfile is describes a storage profile.
    +type StorageProfile struct {
    +	ImageReference *ImageReference `json:"imageReference,omitempty"`
    +	OsDisk         *OSDisk         `json:"osDisk,omitempty"`
    +	DataDisks      *[]DataDisk     `json:"dataDisks,omitempty"`
    +}
    +
    +// SubResource is
    +type SubResource struct {
    +	ID *string `json:"id,omitempty"`
    +}
    +
    +// UpgradePolicy is describes an upgrade policy - automatic or manual.
    +type UpgradePolicy struct {
    +	Mode UpgradeMode `json:"mode,omitempty"`
    +}
    +
    +// Usage is describes Compute Resource Usage.
    +type Usage struct {
    +	Unit         *string    `json:"unit,omitempty"`
    +	CurrentValue *int32     `json:"currentValue,omitempty"`
    +	Limit        *int64     `json:"limit,omitempty"`
    +	Name         *UsageName `json:"name,omitempty"`
    +}
    +
    +// UsageName is the Usage Names.
    +type UsageName struct {
    +	Value          *string `json:"value,omitempty"`
    +	LocalizedValue *string `json:"localizedValue,omitempty"`
    +}
    +
    +// VaultCertificate is describes a single certificate reference in a Key
    +// Vault, and where the certificate should reside on the VM.
    +type VaultCertificate struct {
    +	CertificateURL   *string `json:"certificateUrl,omitempty"`
    +	CertificateStore *string `json:"certificateStore,omitempty"`
    +}
    +
    +// VaultSecretGroup is describes a set of certificates which are all in the
    +// same Key Vault.
    +type VaultSecretGroup struct {
    +	SourceVault       *SubResource        `json:"sourceVault,omitempty"`
    +	VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"`
    +}
    +
    +// VirtualHardDisk is describes the uri of a disk.
    +type VirtualHardDisk struct {
    +	URI *string `json:"uri,omitempty"`
    +}
    +
    +// VirtualMachine is describes a Virtual Machine.
    +type VirtualMachine struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                    `json:"id,omitempty"`
    +	Name              *string                    `json:"name,omitempty"`
    +	Type              *string                    `json:"type,omitempty"`
    +	Location          *string                    `json:"location,omitempty"`
    +	Tags              *map[string]*string        `json:"tags,omitempty"`
    +	Plan              *Plan                      `json:"plan,omitempty"`
    +	Properties        *VirtualMachineProperties  `json:"properties,omitempty"`
    +	Resources         *[]VirtualMachineExtension `json:"resources,omitempty"`
    +}
    +
    +// VirtualMachineAgentInstanceView is the instance view of the VM Agent
    +// running on the virtual machine.
    +type VirtualMachineAgentInstanceView struct {
    +	VMAgentVersion    *string                                       `json:"vmAgentVersion,omitempty"`
    +	ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"`
    +	Statuses          *[]InstanceViewStatus                         `json:"statuses,omitempty"`
    +}
    +
    +// VirtualMachineCaptureParameters is capture Virtual Machine parameters.
    +type VirtualMachineCaptureParameters struct {
    +	VhdPrefix                *string `json:"vhdPrefix,omitempty"`
    +	DestinationContainerName *string `json:"destinationContainerName,omitempty"`
    +	OverwriteVhds            *bool   `json:"overwriteVhds,omitempty"`
    +}
    +
    +// VirtualMachineCaptureResult is resource Id.
    +type VirtualMachineCaptureResult struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                                `json:"id,omitempty"`
    +	Properties        *VirtualMachineCaptureResultProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineCaptureResultProperties is compute-specific operation
    +// properties, including output
    +type VirtualMachineCaptureResultProperties struct {
    +	Output *map[string]interface{} `json:"output,omitempty"`
    +}
    +
    +// VirtualMachineExtension is describes a Virtual Machine Extension.
    +type VirtualMachineExtension struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                            `json:"id,omitempty"`
    +	Name              *string                            `json:"name,omitempty"`
    +	Type              *string                            `json:"type,omitempty"`
    +	Location          *string                            `json:"location,omitempty"`
    +	Tags              *map[string]*string                `json:"tags,omitempty"`
    +	Properties        *VirtualMachineExtensionProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineExtensionHandlerInstanceView is the instance view of a
    +// virtual machine extension handler.
    +type VirtualMachineExtensionHandlerInstanceView struct {
    +	Type               *string             `json:"type,omitempty"`
    +	TypeHandlerVersion *string             `json:"typeHandlerVersion,omitempty"`
    +	Status             *InstanceViewStatus `json:"status,omitempty"`
    +}
    +
    +// VirtualMachineExtensionImage is describes a Virtual Machine Extension Image.
    +type VirtualMachineExtensionImage struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                                 `json:"id,omitempty"`
    +	Name              *string                                 `json:"name,omitempty"`
    +	Type              *string                                 `json:"type,omitempty"`
    +	Location          *string                                 `json:"location,omitempty"`
    +	Tags              *map[string]*string                     `json:"tags,omitempty"`
    +	Properties        *VirtualMachineExtensionImageProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineExtensionImageProperties is describes the properties of a
    +// Virtual Machine Extension Image.
    +type VirtualMachineExtensionImageProperties struct {
    +	OperatingSystem            *string `json:"operatingSystem,omitempty"`
    +	ComputeRole                *string `json:"computeRole,omitempty"`
    +	HandlerSchema              *string `json:"handlerSchema,omitempty"`
    +	VMScaleSetEnabled          *bool   `json:"vmScaleSetEnabled,omitempty"`
    +	SupportsMultipleExtensions *bool   `json:"supportsMultipleExtensions,omitempty"`
    +}
    +
    +// VirtualMachineExtensionInstanceView is the instance view of a virtual
    +// machine extension.
    +type VirtualMachineExtensionInstanceView struct {
    +	Name               *string               `json:"name,omitempty"`
    +	Type               *string               `json:"type,omitempty"`
    +	TypeHandlerVersion *string               `json:"typeHandlerVersion,omitempty"`
    +	Substatuses        *[]InstanceViewStatus `json:"substatuses,omitempty"`
    +	Statuses           *[]InstanceViewStatus `json:"statuses,omitempty"`
    +}
    +
    +// VirtualMachineExtensionProperties is describes the properties of a Virtual
    +// Machine Extension.
    +type VirtualMachineExtensionProperties struct {
    +	ForceUpdateTag          *string                              `json:"forceUpdateTag,omitempty"`
    +	Publisher               *string                              `json:"publisher,omitempty"`
    +	Type                    *string                              `json:"type,omitempty"`
    +	TypeHandlerVersion      *string                              `json:"typeHandlerVersion,omitempty"`
    +	AutoUpgradeMinorVersion *bool                                `json:"autoUpgradeMinorVersion,omitempty"`
    +	Settings                *map[string]interface{}              `json:"settings,omitempty"`
    +	ProtectedSettings       *map[string]interface{}              `json:"protectedSettings,omitempty"`
    +	ProvisioningState       *string                              `json:"provisioningState,omitempty"`
    +	InstanceView            *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"`
    +}
    +
    +// VirtualMachineImage is describes a Virtual Machine Image.
    +type VirtualMachineImage struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                        `json:"id,omitempty"`
    +	Name              *string                        `json:"name,omitempty"`
    +	Location          *string                        `json:"location,omitempty"`
    +	Tags              *map[string]*string            `json:"tags,omitempty"`
    +	Properties        *VirtualMachineImageProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineImageProperties is describes the properties of a Virtual
    +// Machine Image.
    +type VirtualMachineImageProperties struct {
    +	Plan           *PurchasePlan    `json:"plan,omitempty"`
    +	OsDiskImage    *OSDiskImage     `json:"osDiskImage,omitempty"`
    +	DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"`
    +}
    +
    +// VirtualMachineImageResource is virtual machine image resource information.
    +type VirtualMachineImageResource struct {
    +	ID       *string             `json:"id,omitempty"`
    +	Name     *string             `json:"name,omitempty"`
    +	Location *string             `json:"location,omitempty"`
    +	Tags     *map[string]*string `json:"tags,omitempty"`
    +}
    +
    +// VirtualMachineInstanceView is the instance view of a virtual machine.
    +type VirtualMachineInstanceView struct {
    +	PlatformUpdateDomain *int32                                 `json:"platformUpdateDomain,omitempty"`
    +	PlatformFaultDomain  *int32                                 `json:"platformFaultDomain,omitempty"`
    +	RdpThumbPrint        *string                                `json:"rdpThumbPrint,omitempty"`
    +	VMAgent              *VirtualMachineAgentInstanceView       `json:"vmAgent,omitempty"`
    +	Disks                *[]DiskInstanceView                    `json:"disks,omitempty"`
    +	Extensions           *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"`
    +	BootDiagnostics      *BootDiagnosticsInstanceView           `json:"bootDiagnostics,omitempty"`
    +	Statuses             *[]InstanceViewStatus                  `json:"statuses,omitempty"`
    +}
    +
    +// VirtualMachineListResult is the List Virtual Machine operation response.
    +type VirtualMachineListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachine `json:"value,omitempty"`
    +	NextLink          *string           `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualMachineListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualMachineListResult) VirtualMachineListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualMachineProperties is describes the properties of a Virtual Machine.
    +type VirtualMachineProperties struct {
    +	HardwareProfile    *HardwareProfile            `json:"hardwareProfile,omitempty"`
    +	StorageProfile     *StorageProfile             `json:"storageProfile,omitempty"`
    +	OsProfile          *OSProfile                  `json:"osProfile,omitempty"`
    +	NetworkProfile     *NetworkProfile             `json:"networkProfile,omitempty"`
    +	DiagnosticsProfile *DiagnosticsProfile         `json:"diagnosticsProfile,omitempty"`
    +	AvailabilitySet    *SubResource                `json:"availabilitySet,omitempty"`
    +	ProvisioningState  *string                     `json:"provisioningState,omitempty"`
    +	InstanceView       *VirtualMachineInstanceView `json:"instanceView,omitempty"`
    +	LicenseType        *string                     `json:"licenseType,omitempty"`
    +	VMID               *string                     `json:"vmId,omitempty"`
    +}
    +
    +// VirtualMachineScaleSet is describes a Virtual Machine Scale Set.
    +type VirtualMachineScaleSet struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                           `json:"id,omitempty"`
    +	Name              *string                           `json:"name,omitempty"`
    +	Type              *string                           `json:"type,omitempty"`
    +	Location          *string                           `json:"location,omitempty"`
    +	Tags              *map[string]*string               `json:"tags,omitempty"`
    +	Sku               *Sku                              `json:"sku,omitempty"`
    +	Properties        *VirtualMachineScaleSetProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetExtension is describes a Virtual Machine Scale Set
    +// Extension.
    +type VirtualMachineScaleSetExtension struct {
    +	ID         *string                                    `json:"id,omitempty"`
    +	Name       *string                                    `json:"name,omitempty"`
    +	Properties *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetExtensionProfile is describes a virtual machine scale
    +// set extension profile.
    +type VirtualMachineScaleSetExtensionProfile struct {
    +	Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetExtensionProperties is describes the properties of a
    +// Virtual Machine Scale Set Extension.
    +type VirtualMachineScaleSetExtensionProperties struct {
    +	Publisher               *string                 `json:"publisher,omitempty"`
    +	Type                    *string                 `json:"type,omitempty"`
    +	TypeHandlerVersion      *string                 `json:"typeHandlerVersion,omitempty"`
    +	AutoUpgradeMinorVersion *bool                   `json:"autoUpgradeMinorVersion,omitempty"`
    +	Settings                *map[string]interface{} `json:"settings,omitempty"`
    +	ProtectedSettings       *map[string]interface{} `json:"protectedSettings,omitempty"`
    +	ProvisioningState       *string                 `json:"provisioningState,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetInstanceView is the instance view of a virtual
    +// machine scale set.
    +type VirtualMachineScaleSetInstanceView struct {
    +	autorest.Response `json:"-"`
    +	VirtualMachine    *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"`
    +	Extensions        *[]VirtualMachineScaleSetVMExtensionsSummary       `json:"extensions,omitempty"`
    +	Statuses          *[]InstanceViewStatus                              `json:"statuses,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetInstanceViewStatusesSummary is instance view statuses
    +// summary for virtual machines of a virtual machine scale set.
    +type VirtualMachineScaleSetInstanceViewStatusesSummary struct {
    +	StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetIPConfiguration is describes a virtual machine scale
    +// set network profile's IP configuration.
    +type VirtualMachineScaleSetIPConfiguration struct {
    +	ID         *string                                          `json:"id,omitempty"`
    +	Name       *string                                          `json:"name,omitempty"`
    +	Properties *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetIPConfigurationProperties is describes a virtual
    +// machine scale set network profile's IP configuration properties.
    +type VirtualMachineScaleSetIPConfigurationProperties struct {
    +	Subnet                                *APIEntityReference `json:"subnet,omitempty"`
    +	ApplicationGatewayBackendAddressPools *[]SubResource      `json:"applicationGatewayBackendAddressPools,omitempty"`
    +	LoadBalancerBackendAddressPools       *[]SubResource      `json:"loadBalancerBackendAddressPools,omitempty"`
    +	LoadBalancerInboundNatPools           *[]SubResource      `json:"loadBalancerInboundNatPools,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetListResult is the List Virtual Machine operation
    +// response.
    +type VirtualMachineScaleSetListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineScaleSet `json:"value,omitempty"`
    +	NextLink          *string                   `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualMachineScaleSetListResult) VirtualMachineScaleSetListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualMachineScaleSetListSkusResult is the Virtual Machine Scale Set List
    +// Skus operation response.
    +type VirtualMachineScaleSetListSkusResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineScaleSetSku `json:"value,omitempty"`
    +	NextLink          *string                      `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualMachineScaleSetListSkusResult) VirtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualMachineScaleSetListWithLinkResult is the List Virtual Machine
    +// operation response.
    +type VirtualMachineScaleSetListWithLinkResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineScaleSet `json:"value,omitempty"`
    +	NextLink          *string                   `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualMachineScaleSetListWithLinkResult) VirtualMachineScaleSetListWithLinkResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualMachineScaleSetNetworkConfiguration is describes a virtual machine
    +// scale set network profile's network configurations.
    +type VirtualMachineScaleSetNetworkConfiguration struct {
    +	ID         *string                                               `json:"id,omitempty"`
    +	Name       *string                                               `json:"name,omitempty"`
    +	Properties *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetNetworkConfigurationProperties is describes a virtual
    +// machine scale set network profile's IP configuration.
    +type VirtualMachineScaleSetNetworkConfigurationProperties struct {
    +	Primary          *bool                                    `json:"primary,omitempty"`
    +	IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetNetworkProfile is describes a virtual machine scale
    +// set network profile.
    +type VirtualMachineScaleSetNetworkProfile struct {
    +	NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetOSDisk is describes a virtual machine scale set
    +// operating system disk.
    +type VirtualMachineScaleSetOSDisk struct {
    +	Name          *string               `json:"name,omitempty"`
    +	Caching       CachingTypes          `json:"caching,omitempty"`
    +	CreateOption  DiskCreateOptionTypes `json:"createOption,omitempty"`
    +	OsType        OperatingSystemTypes  `json:"osType,omitempty"`
    +	Image         *VirtualHardDisk      `json:"image,omitempty"`
    +	VhdContainers *[]string             `json:"vhdContainers,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetOSProfile is describes a virtual machine scale set OS
    +// profile.
    +type VirtualMachineScaleSetOSProfile struct {
    +	ComputerNamePrefix   *string               `json:"computerNamePrefix,omitempty"`
    +	AdminUsername        *string               `json:"adminUsername,omitempty"`
    +	AdminPassword        *string               `json:"adminPassword,omitempty"`
    +	CustomData           *string               `json:"customData,omitempty"`
    +	WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
    +	LinuxConfiguration   *LinuxConfiguration   `json:"linuxConfiguration,omitempty"`
    +	Secrets              *[]VaultSecretGroup   `json:"secrets,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetProperties is describes the properties of a Virtual
    +// Machine Scale Set.
    +type VirtualMachineScaleSetProperties struct {
    +	UpgradePolicy         *UpgradePolicy                   `json:"upgradePolicy,omitempty"`
    +	VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"`
    +	ProvisioningState     *string                          `json:"provisioningState,omitempty"`
    +	Overprovision         *bool                            `json:"overprovision,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetSku is describes an available virtual machine scale
    +// set sku.
    +type VirtualMachineScaleSetSku struct {
    +	ResourceType *string                            `json:"resourceType,omitempty"`
    +	Sku          *Sku                               `json:"sku,omitempty"`
    +	Capacity     *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetSkuCapacity is describes scaling information of a sku.
    +type VirtualMachineScaleSetSkuCapacity struct {
    +	Minimum         *int64                             `json:"minimum,omitempty"`
    +	Maximum         *int64                             `json:"maximum,omitempty"`
    +	DefaultCapacity *int64                             `json:"defaultCapacity,omitempty"`
    +	ScaleType       VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetStorageProfile is describes a virtual machine scale
    +// set storage profile.
    +type VirtualMachineScaleSetStorageProfile struct {
    +	ImageReference *ImageReference               `json:"imageReference,omitempty"`
    +	OsDisk         *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVM is describes a virtual machine scale set virtual
    +// machine.
    +type VirtualMachineScaleSetVM struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                             `json:"id,omitempty"`
    +	Name              *string                             `json:"name,omitempty"`
    +	Type              *string                             `json:"type,omitempty"`
    +	Location          *string                             `json:"location,omitempty"`
    +	Tags              *map[string]*string                 `json:"tags,omitempty"`
    +	InstanceID        *string                             `json:"instanceId,omitempty"`
    +	Sku               *Sku                                `json:"sku,omitempty"`
    +	Properties        *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"`
    +	Plan              *Plan                               `json:"plan,omitempty"`
    +	Resources         *[]VirtualMachineExtension          `json:"resources,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMExtensionsSummary is extensions summary for virtual
    +// machines of a virtual machine scale set.
    +type VirtualMachineScaleSetVMExtensionsSummary struct {
    +	Name            *string                          `json:"name,omitempty"`
    +	StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMInstanceIDs is specifies the list of virtual
    +// machine scale set instance IDs.
    +type VirtualMachineScaleSetVMInstanceIDs struct {
    +	InstanceIds *[]string `json:"instanceIds,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMInstanceRequiredIDs is specifies the list of
    +// virtual machine scale set instance IDs.
    +type VirtualMachineScaleSetVMInstanceRequiredIDs struct {
    +	InstanceIds *[]string `json:"instanceIds,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMInstanceView is the instance view of a virtual
    +// machine scale set VM.
    +type VirtualMachineScaleSetVMInstanceView struct {
    +	autorest.Response    `json:"-"`
    +	PlatformUpdateDomain *int32                                 `json:"platformUpdateDomain,omitempty"`
    +	PlatformFaultDomain  *int32                                 `json:"platformFaultDomain,omitempty"`
    +	RdpThumbPrint        *string                                `json:"rdpThumbPrint,omitempty"`
    +	VMAgent              *VirtualMachineAgentInstanceView       `json:"vmAgent,omitempty"`
    +	Disks                *[]DiskInstanceView                    `json:"disks,omitempty"`
    +	Extensions           *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"`
    +	BootDiagnostics      *BootDiagnosticsInstanceView           `json:"bootDiagnostics,omitempty"`
    +	Statuses             *[]InstanceViewStatus                  `json:"statuses,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set
    +// VMs operation response.
    +type VirtualMachineScaleSetVMListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineScaleSetVM `json:"value,omitempty"`
    +	NextLink          *string                     `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualMachineScaleSetVMListResult) VirtualMachineScaleSetVMListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualMachineScaleSetVMProfile is describes a virtual machine scale set
    +// virtual machine profile.
    +type VirtualMachineScaleSetVMProfile struct {
    +	OsProfile        *VirtualMachineScaleSetOSProfile        `json:"osProfile,omitempty"`
    +	StorageProfile   *VirtualMachineScaleSetStorageProfile   `json:"storageProfile,omitempty"`
    +	NetworkProfile   *VirtualMachineScaleSetNetworkProfile   `json:"networkProfile,omitempty"`
    +	ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"`
    +}
    +
    +// VirtualMachineScaleSetVMProperties is describes the properties of a virtual
    +// machine scale set virtual machine.
    +type VirtualMachineScaleSetVMProperties struct {
    +	LatestModelApplied *bool                       `json:"latestModelApplied,omitempty"`
    +	InstanceView       *VirtualMachineInstanceView `json:"instanceView,omitempty"`
    +	HardwareProfile    *HardwareProfile            `json:"hardwareProfile,omitempty"`
    +	StorageProfile     *StorageProfile             `json:"storageProfile,omitempty"`
    +	OsProfile          *OSProfile                  `json:"osProfile,omitempty"`
    +	NetworkProfile     *NetworkProfile             `json:"networkProfile,omitempty"`
    +	DiagnosticsProfile *DiagnosticsProfile         `json:"diagnosticsProfile,omitempty"`
    +	AvailabilitySet    *SubResource                `json:"availabilitySet,omitempty"`
    +	ProvisioningState  *string                     `json:"provisioningState,omitempty"`
    +	LicenseType        *string                     `json:"licenseType,omitempty"`
    +}
    +
    +// VirtualMachineSize is describes the properties of a VM size.
    +type VirtualMachineSize struct {
    +	Name                 *string `json:"name,omitempty"`
    +	NumberOfCores        *int32  `json:"numberOfCores,omitempty"`
    +	OsDiskSizeInMB       *int32  `json:"osDiskSizeInMB,omitempty"`
    +	ResourceDiskSizeInMB *int32  `json:"resourceDiskSizeInMB,omitempty"`
    +	MemoryInMB           *int32  `json:"memoryInMB,omitempty"`
    +	MaxDataDiskCount     *int32  `json:"maxDataDiskCount,omitempty"`
    +}
    +
    +// VirtualMachineSizeListResult is the List Virtual Machine operation response.
    +type VirtualMachineSizeListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualMachineSize `json:"value,omitempty"`
    +}
    +
    +// VirtualMachineStatusCodeCount is the status code and count of the virtual
    +// machine scale set instance view status summary.
    +type VirtualMachineStatusCodeCount struct {
    +	Code  *string `json:"code,omitempty"`
    +	Count *int32  `json:"count,omitempty"`
    +}
    +
    +// WindowsConfiguration is describes Windows Configuration of the OS Profile.
    +type WindowsConfiguration struct {
    +	ProvisionVMAgent          *bool                        `json:"provisionVMAgent,omitempty"`
    +	EnableAutomaticUpdates    *bool                        `json:"enableAutomaticUpdates,omitempty"`
    +	TimeZone                  *string                      `json:"timeZone,omitempty"`
    +	AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"`
    +	WinRM                     *WinRMConfiguration          `json:"winRM,omitempty"`
    +}
    +
    +// WinRMConfiguration is describes Windows Remote Management configuration of
    +// the VM
    +type WinRMConfiguration struct {
    +	Listeners *[]WinRMListener `json:"listeners,omitempty"`
    +}
    +
    +// WinRMListener is describes Protocol and thumbprint of Windows Remote
    +// Management listener
    +type WinRMListener struct {
    +	Protocol       ProtocolTypes `json:"protocol,omitempty"`
    +	CertificateURL *string       `json:"certificateUrl,omitempty"`
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go
    new file mode 100644
    index 0000000..474c7ca
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go
    @@ -0,0 +1,134 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// UsageOperationsClient is the the Compute Management Client.
    +type UsageOperationsClient struct {
    +	ManagementClient
    +}
    +
    +// NewUsageOperationsClient creates an instance of the UsageOperationsClient
    +// client.
    +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient {
    +	return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewUsageOperationsClientWithBaseURI creates an instance of the
    +// UsageOperationsClient client.
    +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient {
    +	return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// List lists compute usages for a subscription.
    +//
    +// location is the location upon which resource usage is queried.
    +func (client UsageOperationsClient) List(location string) (result ListUsagesResult, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: location,
    +			Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.UsageOperationsClient", "List")
    +	}
    +
    +	req, err := client.ListPreparer(location)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client UsageOperationsClient) ListPreparer(location string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client UsageOperationsClient) ListNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) {
    +	req, err := lastResults.ListUsagesResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go
    new file mode 100644
    index 0000000..c66fb8c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go
    @@ -0,0 +1,43 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"fmt"
    +)
    +
    +const (
    +	major = "6"
    +	minor = "0"
    +	patch = "0"
    +	// Always begin a "tag" with a dash (as per http://semver.org)
    +	tag             = "-beta"
    +	semVerFormat    = "%s.%s.%s%s"
    +	userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s"
    +)
    +
    +// UserAgent returns the UserAgent string to use when sending http.Requests.
    +func UserAgent() string {
    +	return fmt.Sprintf(userAgentFormat, Version(), "compute", "2016-03-30")
    +}
    +
    +// Version returns the semantic version (see http://semver.org) of the client.
    +func Version() string {
    +	return fmt.Sprintf(semVerFormat, major, minor, patch, tag)
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go
    new file mode 100644
    index 0000000..089ebe1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go
    @@ -0,0 +1,238 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualMachineExtensionImagesClient is the the Compute Management Client.
    +type VirtualMachineExtensionImagesClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachineExtensionImagesClient creates an instance of the
    +// VirtualMachineExtensionImagesClient client.
    +func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient {
    +	return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of
    +// the VirtualMachineExtensionImagesClient client.
    +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient {
    +	return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// Get gets a virtual machine extension image.
    +//
    +func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) {
    +	req, err := client.GetPreparer(location, publisherName, typeParameter, version)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, publisherName string, typeParameter string, version string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +		"type":           autorest.Encode("path", typeParameter),
    +		"version":        autorest.Encode("path", version),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListTypes gets a list of virtual machine extension image types.
    +//
    +func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) {
    +	req, err := client.ListTypesPreparer(location, publisherName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListTypesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListTypesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListTypesPreparer prepares the ListTypes request.
    +func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location string, publisherName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListTypesSender sends the ListTypes request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListTypesResponder handles the response to the ListTypes request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListVersions gets a list of virtual machine extension image versions.
    +//
    +// filter is the filter to apply on the operation.
    +func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) {
    +	req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderby)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListVersionsSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListVersionsResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListVersionsPreparer prepares the ListVersions request.
    +func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +		"type":           autorest.Encode("path", typeParameter),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(filter) > 0 {
    +		queryParameters["$filter"] = autorest.Encode("query", filter)
    +	}
    +	if top != nil {
    +		queryParameters["$top"] = autorest.Encode("query", *top)
    +	}
    +	if len(orderby) > 0 {
    +		queryParameters["$orderby"] = autorest.Encode("query", orderby)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListVersionsSender sends the ListVersions request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListVersionsResponder handles the response to the ListVersions request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go
    new file mode 100644
    index 0000000..dbbce54
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go
    @@ -0,0 +1,261 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// VirtualMachineExtensionsClient is the the Compute Management Client.
    +type VirtualMachineExtensionsClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachineExtensionsClient creates an instance of the
    +// VirtualMachineExtensionsClient client.
    +func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient {
    +	return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the
    +// VirtualMachineExtensionsClient client.
    +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient {
    +	return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the operation to create or update the extension. This method
    +// may poll for completion. Polling can be canceled by passing the cancel
    +// channel argument. The channel will be used to cancel polling and any
    +// outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine where the extension should be create or updated.
    +// vmExtensionName is the name of the virtual machine extension.
    +// extensionParameters is parameters supplied to the Create Virtual Machine
    +// Extension operation.
    +func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: extensionParameters,
    +			Constraints: []validation.Constraint{{Target: "extensionParameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "extensionParameters.Properties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmExtensionName":   autorest.Encode("path", vmExtensionName),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
    +		autorest.WithJSON(extensionParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the operation to delete the extension. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine where the extension should be deleted. vmExtensionName
    +// is the name of the virtual machine extension.
    +func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmExtensionName":   autorest.Encode("path", vmExtensionName),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the operation to get the extension.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine containing the extension. vmExtensionName is the name
    +// of the virtual machine extension. expand is the expand expression to apply
    +// on the operation.
    +func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, vmName, vmExtensionName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, vmName string, vmExtensionName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmExtensionName":   autorest.Encode("path", vmExtensionName),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go
    new file mode 100644
    index 0000000..50d9614
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go
    @@ -0,0 +1,362 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualMachineImagesClient is the the Compute Management Client.
    +type VirtualMachineImagesClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachineImagesClient creates an instance of the
    +// VirtualMachineImagesClient client.
    +func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient {
    +	return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the
    +// VirtualMachineImagesClient client.
    +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient {
    +	return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// Get gets a virtual machine image.
    +//
    +func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) {
    +	req, err := client.GetPreparer(location, publisherName, offer, skus, version)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualMachineImagesClient) GetPreparer(location string, publisherName string, offer string, skus string, version string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"offer":          autorest.Encode("path", offer),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"skus":           autorest.Encode("path", skus),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +		"version":        autorest.Encode("path", version),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List gets a list of virtual machine images.
    +//
    +// filter is the filter to apply on the operation.
    +func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) {
    +	req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualMachineImagesClient) ListPreparer(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"offer":          autorest.Encode("path", offer),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"skus":           autorest.Encode("path", skus),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(filter) > 0 {
    +		queryParameters["$filter"] = autorest.Encode("query", filter)
    +	}
    +	if top != nil {
    +		queryParameters["$top"] = autorest.Encode("query", *top)
    +	}
    +	if len(orderby) > 0 {
    +		queryParameters["$orderby"] = autorest.Encode("query", orderby)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListOffers gets a list of virtual machine image offers.
    +//
    +func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result ListVirtualMachineImageResource, err error) {
    +	req, err := client.ListOffersPreparer(location, publisherName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListOffersSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListOffersResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListOffersPreparer prepares the ListOffers request.
    +func (client VirtualMachineImagesClient) ListOffersPreparer(location string, publisherName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListOffersSender sends the ListOffers request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListOffersResponder handles the response to the ListOffers request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListPublishers gets a list of virtual machine image publishers.
    +//
    +func (client VirtualMachineImagesClient) ListPublishers(location string) (result ListVirtualMachineImageResource, err error) {
    +	req, err := client.ListPublishersPreparer(location)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListPublishersSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListPublishersResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPublishersPreparer prepares the ListPublishers request.
    +func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListPublishersSender sends the ListPublishers request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListPublishersResponder handles the response to the ListPublishers request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListSkus gets a list of virtual machine image skus.
    +//
    +func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) {
    +	req, err := client.ListSkusPreparer(location, publisherName, offer)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSkusSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListSkusResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListSkusPreparer prepares the ListSkus request.
    +func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publisherName string, offer string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"offer":          autorest.Encode("path", offer),
    +		"publisherName":  autorest.Encode("path", publisherName),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSkusSender sends the ListSkus request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListSkusResponder handles the response to the ListSkus request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go
    new file mode 100644
    index 0000000..8b044e7
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go
    @@ -0,0 +1,984 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// VirtualMachinesClient is the the Compute Management Client.
    +type VirtualMachinesClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient
    +// client.
    +func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient {
    +	return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachinesClientWithBaseURI creates an instance of the
    +// VirtualMachinesClient client.
    +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient {
    +	return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// Capture captures the VM by copying virtual hard disks of the VM and outputs
    +// a template that can be used to create similar VMs. This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine. parameters is parameters supplied to the Capture
    +// Virtual Machine operation.
    +func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.VhdPrefix", Name: validation.Null, Rule: true, Chain: nil},
    +				{Target: "parameters.DestinationContainerName", Name: validation.Null, Rule: true, Chain: nil},
    +				{Target: "parameters.OverwriteVhds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "Capture")
    +	}
    +
    +	req, err := client.CapturePreparer(resourceGroupName, vmName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CaptureSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CaptureResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CapturePreparer prepares the Capture request.
    +func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CaptureSender sends the Capture request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CaptureResponder handles the response to the Capture request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// CreateOrUpdate the operation to create or update a virtual machine. This
    +// method may poll for completion. Polling can be canceled by passing the
    +// cancel channel argument. The channel will be used to cancel polling and
    +// any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine. parameters is parameters supplied to the Create
    +// Virtual Machine operation.
    +func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.StorageProfile", Name: validation.Null, Rule: false,
    +					Chain: []validation.Constraint{{Target: "parameters.Properties.StorageProfile.OsDisk", Name: validation.Null, Rule: false,
    +						Chain: []validation.Constraint{{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false,
    +							Chain: []validation.Constraint{{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
    +								Chain: []validation.Constraint{{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
    +									{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
    +								}},
    +								{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
    +									Chain: []validation.Constraint{{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
    +										{Target: "parameters.Properties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
    +									}},
    +							}},
    +							{Target: "parameters.Properties.StorageProfile.OsDisk.Name", Name: validation.Null, Rule: true, Chain: nil},
    +							{Target: "parameters.Properties.StorageProfile.OsDisk.Vhd", Name: validation.Null, Rule: true, Chain: nil},
    +						}},
    +					}},
    +					{Target: "parameters.Properties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +					{Target: "parameters.Properties.InstanceView", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +					{Target: "parameters.Properties.VMID", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +				}},
    +				{Target: "parameters.Resources", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Deallocate shuts down the Virtual Machine and releases the compute
    +// resources. You are not billed for the compute resources that this Virtual
    +// Machine uses. This method may poll for completion. Polling can be canceled
    +// by passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeallocatePreparer(resourceGroupName, vmName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeallocateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeallocateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeallocatePreparer prepares the Deallocate request.
    +func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeallocateSender sends the Deallocate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeallocateResponder handles the response to the Deallocate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the operation to delete a virtual machine. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, vmName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Generalize sets the state of the VM as Generalized.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, err error) {
    +	req, err := client.GeneralizePreparer(resourceGroupName, vmName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GeneralizeSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GeneralizeResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GeneralizePreparer prepares the Generalize request.
    +func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, vmName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GeneralizeSender sends the Generalize request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GeneralizeResponder handles the response to the Generalize request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the operation to get a virtual machine.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine. expand is the expand expression to apply on the
    +// operation. Possible values include: 'instanceView'
    +func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand InstanceViewTypes) (result VirtualMachine, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, vmName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand InstanceViewTypes) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(string(expand)) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the operation to list virtual machines under a resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) {
    +	req, err := lastResults.VirtualMachineListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll gets the list of Virtual Machines in the subscription. Use nextLink
    +// property in the response to get the next page of Virtual Machines. Do this
    +// till nextLink is not null to fetch all the Virtual Machines.
    +func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) {
    +	req, err := lastResults.VirtualMachineListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAvailableSizes lists all available virtual machine sizes it can be
    +// resized to for a virtual machine.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, err error) {
    +	req, err := client.ListAvailableSizesPreparer(resourceGroupName, vmName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAvailableSizesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAvailableSizesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAvailableSizesPreparer prepares the ListAvailableSizes request.
    +func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, vmName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// PowerOff the operation to power off (stop) a virtual machine. This method
    +// may poll for completion. Polling can be canceled by passing the cancel
    +// channel argument. The channel will be used to cancel polling and any
    +// outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.PowerOffPreparer(resourceGroupName, vmName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.PowerOffSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.PowerOffResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// PowerOffPreparer prepares the PowerOff request.
    +func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// PowerOffSender sends the PowerOff request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// PowerOffResponder handles the response to the PowerOff request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Redeploy the operation to redeploy a virtual machine. This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) Redeploy(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.RedeployPreparer(resourceGroupName, vmName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.RedeploySender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.RedeployResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// RedeployPreparer prepares the Redeploy request.
    +func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// RedeploySender sends the Redeploy request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) RedeploySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// RedeployResponder handles the response to the Redeploy request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Restart the operation to restart a virtual machine. This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.RestartPreparer(resourceGroupName, vmName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.RestartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.RestartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// RestartPreparer prepares the Restart request.
    +func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// RestartSender sends the Restart request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// RestartResponder handles the response to the Restart request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Start the operation to start a virtual machine. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmName is the name of
    +// the virtual machine.
    +func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.StartPreparer(resourceGroupName, vmName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.StartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.StartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// StartPreparer prepares the Start request.
    +func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmName":            autorest.Encode("path", vmName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// StartSender sends the Start request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// StartResponder handles the response to the Start request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go
    new file mode 100644
    index 0000000..f2b92a2
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go
    @@ -0,0 +1,1096 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// VirtualMachineScaleSetsClient is the the Compute Management Client.
    +type VirtualMachineScaleSetsClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachineScaleSetsClient creates an instance of the
    +// VirtualMachineScaleSetsClient client.
    +func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient {
    +	return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the
    +// VirtualMachineScaleSetsClient client.
    +func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient {
    +	return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate allows you to create or update a virtual machine scale set
    +// by providing parameters or a path to pre-configured parameter file. This
    +// method may poll for completion. Polling can be canceled by passing the
    +// cancel channel argument. The channel will be used to cancel polling and
    +// any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. name is parameters
    +// supplied to the Create Virtual Machine Scale Set operation. parameters is
    +// parameters supplied to the Create Virtual Machine Scale Set operation.
    +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.VirtualMachineProfile", Name: validation.Null, Rule: false,
    +					Chain: []validation.Constraint{{Target: "parameters.Properties.VirtualMachineProfile.StorageProfile", Name: validation.Null, Rule: false,
    +						Chain: []validation.Constraint{{Target: "parameters.Properties.VirtualMachineProfile.StorageProfile.OsDisk", Name: validation.Null, Rule: false,
    +							Chain: []validation.Constraint{{Target: "parameters.Properties.VirtualMachineProfile.StorageProfile.OsDisk.Name", Name: validation.Null, Rule: true, Chain: nil}}},
    +						}},
    +					}},
    +					{Target: "parameters.Properties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"name":              autorest.Encode("path", name),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{name}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Deallocate allows you to deallocate virtual machines in a virtual machine
    +// scale set. Shuts down the virtual machines and releases the compute
    +// resources. You are not billed for the compute resources that this virtual
    +// machine scale set uses. This method may poll for completion. Polling can
    +// be canceled by passing the cancel channel argument. The channel will be
    +// used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. vmInstanceIDs is the list of
    +// virtual machine scale set instance IDs.
    +func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeallocateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeallocateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeallocatePreparer prepares the Deallocate request.
    +func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	if vmInstanceIDs != nil {
    +		preparer = autorest.DecoratePreparer(preparer,
    +			autorest.WithJSON(vmInstanceIDs))
    +	}
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeallocateSender sends the Deallocate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeallocateResponder handles the response to the Deallocate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete allows you to delete a virtual machine scale set. This method may
    +// poll for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set.
    +func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// DeleteInstances allows you to delete virtual machines in a virtual machine
    +// scale set. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. vmInstanceIDs is the list of
    +// virtual machine scale set instance IDs.
    +func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: vmInstanceIDs,
    +			Constraints: []validation.Constraint{{Target: "vmInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances")
    +	}
    +
    +	req, err := client.DeleteInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteInstancesSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteInstancesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeleteInstancesPreparer prepares the DeleteInstances request.
    +func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete", pathParameters),
    +		autorest.WithJSON(vmInstanceIDs),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteInstancesSender sends the DeleteInstances request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get display information about a virtual machine scale set.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set.
    +func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSet, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, vmScaleSetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// GetInstanceView displays status of a virtual machine scale set instance.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set.
    +func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) {
    +	req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetInstanceViewSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetInstanceViewResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetInstanceViewPreparer prepares the GetInstanceView request.
    +func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetInstanceViewSender sends the GetInstanceView request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetInstanceView, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List lists all virtual machine scale sets under a resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client VirtualMachineScaleSetsClient) List(resourceGroupName string) (result VirtualMachineScaleSetListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualMachineScaleSetsClient) ListNextResults(lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, err error) {
    +	req, err := lastResults.VirtualMachineScaleSetListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll lists all Virtual Machine Scale Sets in the subscription. Use
    +// nextLink property in the response to get the next page of Virtual Machine
    +// Scale Sets. Do this till nextLink is not null to fetch all the Virtual
    +// Machine Scale Sets.
    +func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) ListAllResponder(resp *http.Response) (result VirtualMachineScaleSetListWithLinkResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, err error) {
    +	req, err := lastResults.VirtualMachineScaleSetListWithLinkResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListSkus displays available skus for your virtual machine scale set
    +// including the minimum and maximum vm instances allowed for a particular
    +// sku.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set.
    +func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetListSkusResult, err error) {
    +	req, err := client.ListSkusPreparer(resourceGroupName, vmScaleSetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSkusSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListSkusResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListSkusPreparer prepares the ListSkus request.
    +func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSkusSender sends the ListSkus request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListSkusResponder handles the response to the ListSkus request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) ListSkusResponder(resp *http.Response) (result VirtualMachineScaleSetListSkusResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListSkusNextResults retrieves the next set of results, if any.
    +func (client VirtualMachineScaleSetsClient) ListSkusNextResults(lastResults VirtualMachineScaleSetListSkusResult) (result VirtualMachineScaleSetListSkusResult, err error) {
    +	req, err := lastResults.VirtualMachineScaleSetListSkusResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSkusSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListSkusResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// PowerOff allows you to power off (stop) virtual machines in a virtual
    +// machine scale set. Note that resources are still attached and you are
    +// getting charged for the resources. Use deallocate to release resources.
    +// This method may poll for completion. Polling can be canceled by passing
    +// the cancel channel argument. The channel will be used to cancel polling
    +// and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. vmInstanceIDs is the list of
    +// virtual machine scale set instance IDs.
    +func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.PowerOffSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.PowerOffResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// PowerOffPreparer prepares the PowerOff request.
    +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	if vmInstanceIDs != nil {
    +		preparer = autorest.DecoratePreparer(preparer,
    +			autorest.WithJSON(vmInstanceIDs))
    +	}
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// PowerOffSender sends the PowerOff request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// PowerOffResponder handles the response to the PowerOff request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Reimage allows you to re-image(update the version of the installed
    +// operating system) virtual machines in a virtual machine scale set. This
    +// method may poll for completion. Polling can be canceled by passing the
    +// cancel channel argument. The channel will be used to cancel polling and
    +// any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set.
    +func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ReimageSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ReimageResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ReimagePreparer prepares the Reimage request.
    +func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ReimageSender sends the Reimage request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ReimageResponder handles the response to the Reimage request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Restart allows you to restart virtual machines in a virtual machine scale
    +// set. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. vmInstanceIDs is the list of
    +// virtual machine scale set instance IDs.
    +func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.RestartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.RestartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// RestartPreparer prepares the Restart request.
    +func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	if vmInstanceIDs != nil {
    +		preparer = autorest.DecoratePreparer(preparer,
    +			autorest.WithJSON(vmInstanceIDs))
    +	}
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// RestartSender sends the Restart request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// RestartResponder handles the response to the Restart request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Start allows you to start virtual machines in a virtual machine scale set.
    +// This method may poll for completion. Polling can be canceled by passing
    +// the cancel channel argument. The channel will be used to cancel polling
    +// and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. vmInstanceIDs is the list of
    +// virtual machine scale set instance IDs.
    +func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.StartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.StartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// StartPreparer prepares the Start request.
    +func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	if vmInstanceIDs != nil {
    +		preparer = autorest.DecoratePreparer(preparer,
    +			autorest.WithJSON(vmInstanceIDs))
    +	}
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// StartSender sends the Start request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// StartResponder handles the response to the Start request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// UpdateInstances allows you to manually upgrade virtual machines in a
    +// virtual machine scale set. This method may poll for completion. Polling
    +// can be canceled by passing the cancel channel argument. The channel will
    +// be used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. vmInstanceIDs is the list of
    +// virtual machine scale set instance IDs.
    +func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: vmInstanceIDs,
    +			Constraints: []validation.Constraint{{Target: "vmInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances")
    +	}
    +
    +	req, err := client.UpdateInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.UpdateInstancesSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.UpdateInstancesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// UpdateInstancesPreparer prepares the UpdateInstances request.
    +func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade", pathParameters),
    +		autorest.WithJSON(vmInstanceIDs),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// UpdateInstancesSender sends the UpdateInstances request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// UpdateInstancesResponder handles the response to the UpdateInstances request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go
    new file mode 100644
    index 0000000..37f2be3
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go
    @@ -0,0 +1,689 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualMachineScaleSetVMsClient is the the Compute Management Client.
    +type VirtualMachineScaleSetVMsClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachineScaleSetVMsClient creates an instance of the
    +// VirtualMachineScaleSetVMsClient client.
    +func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient {
    +	return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the
    +// VirtualMachineScaleSetVMsClient client.
    +func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient {
    +	return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// Deallocate allows you to deallocate a virtual machine scale set virtual
    +// machine. Shuts down the virtual machine and releases the compute
    +// resources. You are not billed for the compute resources that this virtual
    +// machine uses. This method may poll for completion. Polling can be canceled
    +// by passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeallocateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeallocateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeallocatePreparer prepares the Deallocate request.
    +func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeallocateSender sends the Deallocate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeallocateResponder handles the response to the Deallocate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete allows you to delete a virtual machine scale set. This method may
    +// poll for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get displays information about a virtual machine scale set virtual machine.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, vmScaleSetName, instanceID)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// GetInstanceView displays the status of a virtual machine scale set virtual
    +// machine.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) {
    +	req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName, instanceID)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetInstanceViewSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetInstanceViewResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetInstanceViewPreparer prepares the GetInstanceView request.
    +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetInstanceViewSender sends the GetInstanceView request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetVMInstanceView, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List lists all virtual machines in a VM scale sets.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualMachineScaleSetName is the name of the virtual machine scale set.
    +// filter is the filter to apply on the operation. selectParameter is the
    +// list parameters. expand is the expand expression to apply on the
    +// operation.
    +func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":          autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":             autorest.Encode("path", client.SubscriptionID),
    +		"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(filter) > 0 {
    +		queryParameters["$filter"] = autorest.Encode("query", filter)
    +	}
    +	if len(selectParameter) > 0 {
    +		queryParameters["$select"] = autorest.Encode("query", selectParameter)
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualMachineScaleSetVMsClient) ListNextResults(lastResults VirtualMachineScaleSetVMListResult) (result VirtualMachineScaleSetVMListResult, err error) {
    +	req, err := lastResults.VirtualMachineScaleSetVMListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// PowerOff allows you to power off (stop) a virtual machine in a VM scale
    +// set. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.PowerOffSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.PowerOffResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// PowerOffPreparer prepares the PowerOff request.
    +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// PowerOffSender sends the PowerOff request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// PowerOffResponder handles the response to the PowerOff request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Reimage allows you to re-image(update the version of the installed
    +// operating system) a virtual machine scale set instance. This method may
    +// poll for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ReimageSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ReimageResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ReimagePreparer prepares the Reimage request.
    +func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ReimageSender sends the Reimage request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ReimageResponder handles the response to the Reimage request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Restart allows you to restart a virtual machine in a VM scale set. This
    +// method may poll for completion. Polling can be canceled by passing the
    +// cancel channel argument. The channel will be used to cancel polling and
    +// any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.RestartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.RestartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// RestartPreparer prepares the Restart request.
    +func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// RestartSender sends the Restart request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// RestartResponder handles the response to the Restart request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Start allows you to start a virtual machine in a VM scale set. This method
    +// may poll for completion. Polling can be canceled by passing the cancel
    +// channel argument. The channel will be used to cancel polling and any
    +// outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. vmScaleSetName is the
    +// name of the virtual machine scale set. instanceID is the instance id of
    +// the virtual machine.
    +func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.StartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.StartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// StartPreparer prepares the Start request.
    +func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"instanceId":        autorest.Encode("path", instanceID),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +		"vmScaleSetName":    autorest.Encode("path", vmScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// StartSender sends the Start request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// StartResponder handles the response to the Start request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go
    new file mode 100644
    index 0000000..507e9f1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go
    @@ -0,0 +1,111 @@
    +package compute
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// VirtualMachineSizesClient is the the Compute Management Client.
    +type VirtualMachineSizesClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualMachineSizesClient creates an instance of the
    +// VirtualMachineSizesClient client.
    +func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient {
    +	return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the
    +// VirtualMachineSizesClient client.
    +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient {
    +	return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// List lists all available virtual machine sizes for a subscription in a
    +// location.
    +//
    +// location is the location upon which virtual-machine-sizes is queried.
    +func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: location,
    +			Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineSizesClient", "List")
    +	}
    +
    +	req, err := client.ListPreparer(location)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go
    new file mode 100644
    index 0000000..8b70b12
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go
    @@ -0,0 +1,635 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// ApplicationGatewaysClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type ApplicationGatewaysClient struct {
    +	ManagementClient
    +}
    +
    +// NewApplicationGatewaysClient creates an instance of the
    +// ApplicationGatewaysClient client.
    +func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient {
    +	return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewApplicationGatewaysClientWithBaseURI creates an instance of the
    +// ApplicationGatewaysClient client.
    +func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient {
    +	return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// BackendHealth the BackendHealth operation gets the backend health of
    +// application gateway in the specified resource group through Network
    +// resource provider. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. applicationGatewayName
    +// is the name of the application gateway. expand is expands
    +// BackendAddressPool and BackendHttpSettings referenced in backend health.
    +func (client ApplicationGatewaysClient) BackendHealth(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.BackendHealthPreparer(resourceGroupName, applicationGatewayName, expand, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.BackendHealthSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.BackendHealthResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// BackendHealthPreparer prepares the BackendHealth request.
    +func (client ApplicationGatewaysClient) BackendHealthPreparer(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"applicationGatewayName": autorest.Encode("path", applicationGatewayName),
    +		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// BackendHealthSender sends the BackendHealth request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// BackendHealthResponder handles the response to the BackendHealth request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) BackendHealthResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// CreateOrUpdate the Put ApplicationGateway operation creates/updates a
    +// ApplicationGateway This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. applicationGatewayName
    +// is the name of the ApplicationGateway. parameters is parameters supplied
    +// to the create/delete ApplicationGateway operation
    +func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.WebApplicationFirewallConfiguration", Name: validation.Null, Rule: false,
    +					Chain: []validation.Constraint{{Target: "parameters.Properties.WebApplicationFirewallConfiguration.Enabled", Name: validation.Null, Rule: true, Chain: nil}}},
    +					{Target: "parameters.Properties.OperationalState", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"applicationGatewayName": autorest.Encode("path", applicationGatewayName),
    +		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete ApplicationGateway operation deletes the specified
    +// application gateway. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. applicationGatewayName
    +// is the name of the application gateway.
    +func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"applicationGatewayName": autorest.Encode("path", applicationGatewayName),
    +		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get ApplicationGateway operation retrieves information about the
    +// specified application gateway.
    +//
    +// resourceGroupName is the name of the resource group. applicationGatewayName
    +// is the name of the application gateway.
    +func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, applicationGatewayName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"applicationGatewayName": autorest.Encode("path", applicationGatewayName),
    +		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (result ApplicationGateway, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List ApplicationGateway operation retrieves all the application
    +// gateways in a resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) ListResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) {
    +	req, err := lastResults.ApplicationGatewayListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the List ApplicationGateway operation retrieves all the application
    +// gateways in a subscription.
    +func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) ListAllResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client ApplicationGatewaysClient) ListAllNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) {
    +	req, err := lastResults.ApplicationGatewayListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// Start the Start ApplicationGateway operation starts application gateway in
    +// the specified resource group through Network resource provider. This
    +// method may poll for completion. Polling can be canceled by passing the
    +// cancel channel argument. The channel will be used to cancel polling and
    +// any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. applicationGatewayName
    +// is the name of the application gateway.
    +func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.StartPreparer(resourceGroupName, applicationGatewayName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.StartSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.StartResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// StartPreparer prepares the Start request.
    +func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"applicationGatewayName": autorest.Encode("path", applicationGatewayName),
    +		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// StartSender sends the Start request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// StartResponder handles the response to the Start request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Stop the STOP ApplicationGateway operation stops application gateway in the
    +// specified resource group through Network resource provider. This method
    +// may poll for completion. Polling can be canceled by passing the cancel
    +// channel argument. The channel will be used to cancel polling and any
    +// outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. applicationGatewayName
    +// is the name of the application gateway.
    +func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.StopPreparer(resourceGroupName, applicationGatewayName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.StopSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.StopResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// StopPreparer prepares the Stop request.
    +func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"applicationGatewayName": autorest.Encode("path", applicationGatewayName),
    +		"resourceGroupName":      autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":         autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// StopSender sends the Stop request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// StopResponder handles the response to the Stop request. The method always
    +// closes the http.Response Body.
    +func (client ApplicationGatewaysClient) StopResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go
    new file mode 100644
    index 0000000..b2ab581
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go
    @@ -0,0 +1,130 @@
    +// Package network implements the Azure ARM Network service API version
    +// 2016-09-01.
    +//
    +// The Microsoft Azure Network management API provides a RESTful set of web
    +// services that interact with Microsoft Azure Networks service to manage
    +// your network resources. The API has entities that capture the relationship
    +// between an end user and the Microsoft Azure Networks service.
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +const (
    +	// APIVersion is the version of the Network
    +	APIVersion = "2016-09-01"
    +
    +	// DefaultBaseURI is the default URI used for the service Network
    +	DefaultBaseURI = "https://management.azure.com"
    +)
    +
    +// ManagementClient is the base client for Network.
    +type ManagementClient struct {
    +	autorest.Client
    +	BaseURI        string
    +	APIVersion     string
    +	SubscriptionID string
    +}
    +
    +// New creates an instance of the ManagementClient client.
    +func New(subscriptionID string) ManagementClient {
    +	return NewWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewWithBaseURI creates an instance of the ManagementClient client.
    +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
    +	return ManagementClient{
    +		Client:         autorest.NewClientWithUserAgent(UserAgent()),
    +		BaseURI:        baseURI,
    +		APIVersion:     APIVersion,
    +		SubscriptionID: subscriptionID,
    +	}
    +}
    +
    +// CheckDNSNameAvailability checks whether a domain name in the cloudapp.net
    +// zone is available for use.
    +//
    +// location is the location of the domain name domainNameLabel is the domain
    +// name to be verified. It must conform to the following regular expression:
    +// ^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
    +func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, err error) {
    +	req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CheckDNSNameAvailabilitySender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CheckDNSNameAvailabilityResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CheckDNSNameAvailabilityPreparer prepares the CheckDNSNameAvailability request.
    +func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, domainNameLabel string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(domainNameLabel) > 0 {
    +		queryParameters["domainNameLabel"] = autorest.Encode("query", domainNameLabel)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always
    +// closes the http.Response Body.
    +func (client ManagementClient) CheckDNSNameAvailabilityResponder(resp *http.Response) (result DNSNameAvailabilityResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go
    new file mode 100644
    index 0000000..86b8faa
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go
    @@ -0,0 +1,343 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// ExpressRouteCircuitAuthorizationsClient is the the Microsoft Azure Network
    +// management API provides a RESTful set of web services that interact with
    +// Microsoft Azure Networks service to manage your network resources. The API
    +// has entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type ExpressRouteCircuitAuthorizationsClient struct {
    +	ManagementClient
    +}
    +
    +// NewExpressRouteCircuitAuthorizationsClient creates an instance of the
    +// ExpressRouteCircuitAuthorizationsClient client.
    +func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRouteCircuitAuthorizationsClient {
    +	return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance
    +// of the ExpressRouteCircuitAuthorizationsClient client.
    +func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient {
    +	return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put Authorization operation creates/updates an
    +// authorization in the specified ExpressRouteCircuits This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route circuit. authorizationName is the name of the
    +// authorization. authorizationParameters is parameters supplied to the
    +// create/update ExpressRouteCircuitAuthorization operation
    +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"authorizationName": autorest.Encode("path", authorizationName),
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters),
    +		autorest.WithJSON(authorizationParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete authorization operation deletes the specified
    +// authorization from the specified ExpressRouteCircuit. This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route circuit. authorizationName is the name of the
    +// authorization.
    +func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"authorizationName": autorest.Encode("path", authorizationName),
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitAuthorizationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the GET authorization operation retrieves the specified authorization
    +// from the specified ExpressRouteCircuit.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route circuit. authorizationName is the name of the
    +// authorization.
    +func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, circuitName, authorizationName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"authorizationName": autorest.Encode("path", authorizationName),
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitAuthorizationsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List authorization operation retrieves all the authorizations in
    +// an ExpressRouteCircuit.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit.
    +func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, circuitName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitAuthorizationsClient) ListResponder(resp *http.Response) (result AuthorizationListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client ExpressRouteCircuitAuthorizationsClient) ListNextResults(lastResults AuthorizationListResult) (result AuthorizationListResult, err error) {
    +	req, err := lastResults.AuthorizationListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go
    new file mode 100644
    index 0000000..ddbff0a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go
    @@ -0,0 +1,339 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// ExpressRouteCircuitPeeringsClient is the the Microsoft Azure Network
    +// management API provides a RESTful set of web services that interact with
    +// Microsoft Azure Networks service to manage your network resources. The API
    +// has entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type ExpressRouteCircuitPeeringsClient struct {
    +	ManagementClient
    +}
    +
    +// NewExpressRouteCircuitPeeringsClient creates an instance of the
    +// ExpressRouteCircuitPeeringsClient client.
    +func NewExpressRouteCircuitPeeringsClient(subscriptionID string) ExpressRouteCircuitPeeringsClient {
    +	return NewExpressRouteCircuitPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the
    +// ExpressRouteCircuitPeeringsClient client.
    +func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitPeeringsClient {
    +	return ExpressRouteCircuitPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put Peering operation creates/updates an peering in the
    +// specified ExpressRouteCircuits This method may poll for completion.
    +// Polling can be canceled by passing the cancel channel argument. The
    +// channel will be used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route circuit. peeringName is the name of the peering.
    +// peeringParameters is parameters supplied to the create/update
    +// ExpressRouteCircuit Peering operation
    +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters),
    +		autorest.WithJSON(peeringParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete peering operation deletes the specified peering from the
    +// ExpressRouteCircuit. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route circuit. peeringName is the name of the peering.
    +func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the GET peering operation retrieves the specified authorization from
    +// the ExpressRouteCircuit.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route circuit. peeringName is the name of the peering.
    +func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, circuitName, peeringName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitPeeringsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List peering operation retrieves all the peerings in an
    +// ExpressRouteCircuit.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit.
    +func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, circuitName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitPeeringsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitPeeringListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client ExpressRouteCircuitPeeringsClient) ListNextResults(lastResults ExpressRouteCircuitPeeringListResult) (result ExpressRouteCircuitPeeringListResult, err error) {
    +	req, err := lastResults.ExpressRouteCircuitPeeringListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go
    new file mode 100644
    index 0000000..9e46f8f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go
    @@ -0,0 +1,761 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// ExpressRouteCircuitsClient is the the Microsoft Azure Network management
    +// API provides a RESTful set of web services that interact with Microsoft
    +// Azure Networks service to manage your network resources. The API has
    +// entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type ExpressRouteCircuitsClient struct {
    +	ManagementClient
    +}
    +
    +// NewExpressRouteCircuitsClient creates an instance of the
    +// ExpressRouteCircuitsClient client.
    +func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient {
    +	return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the
    +// ExpressRouteCircuitsClient client.
    +func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient {
    +	return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put ExpressRouteCircuit operation creates/updates a
    +// ExpressRouteCircuit This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit. parameters is parameters supplied to the
    +// create/delete ExpressRouteCircuit operation
    +func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete ExpressRouteCircuit operation deletes the specified
    +// ExpressRouteCircuit. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the express route Circuit.
    +func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, circuitName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string, circuitName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get ExpressRouteCircuit operation retrieves information about the
    +// specified ExpressRouteCircuit.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit.
    +func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, circuitName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, circuitName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuit, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// GetPeeringStats the List stats ExpressRouteCircuit operation retrieves all
    +// the stats from a ExpressRouteCircuits in a resource group.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit. peeringName is the name of the peering.
    +func (client ExpressRouteCircuitsClient) GetPeeringStats(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitStats, err error) {
    +	req, err := client.GetPeeringStatsPreparer(resourceGroupName, circuitName, peeringName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetPeeringStatsSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetPeeringStatsResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPeeringStatsPreparer prepares the GetPeeringStats request.
    +func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetPeeringStatsSender sends the GetPeeringStats request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) GetPeeringStatsSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetPeeringStatsResponder handles the response to the GetPeeringStats request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// GetStats the List stats ExpressRouteCircuit operation retrieves all the
    +// stats from a ExpressRouteCircuits in a resource group.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit.
    +func (client ExpressRouteCircuitsClient) GetStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitStats, err error) {
    +	req, err := client.GetStatsPreparer(resourceGroupName, circuitName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetStatsSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetStatsResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetStatsPreparer prepares the GetStats request.
    +func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName string, circuitName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetStatsSender sends the GetStats request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) GetStatsSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetStatsResponder handles the response to the GetStats request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) GetStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List ExpressRouteCircuit operation retrieves all the
    +// ExpressRouteCircuits in a resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client ExpressRouteCircuitsClient) List(resourceGroupName string) (result ExpressRouteCircuitListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) {
    +	req, err := lastResults.ExpressRouteCircuitListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the List ExpressRouteCircuit operation retrieves all the
    +// ExpressRouteCircuits in a subscription.
    +func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) ListAllResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) {
    +	req, err := lastResults.ExpressRouteCircuitListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListArpTable the ListArpTable from ExpressRouteCircuit operation retrieves
    +// the currently advertised arp table associated with the
    +// ExpressRouteCircuits in a resource group. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit. peeringName is the name of the peering. devicePath is
    +// the path of the device.
    +func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ListArpTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListArpTableSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListArpTableResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListArpTablePreparer prepares the ListArpTable request.
    +func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"devicePath":        autorest.Encode("path", devicePath),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ListArpTableSender sends the ListArpTable request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ListArpTableResponder handles the response to the ListArpTable request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// ListRoutesTable the ListRoutesTable from ExpressRouteCircuit operation
    +// retrieves the currently advertised routes table associated with the
    +// ExpressRouteCircuits in a resource group. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit. peeringName is the name of the peering. devicePath is
    +// the path of the device.
    +func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListRoutesTableSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListRoutesTableResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListRoutesTablePreparer prepares the ListRoutesTable request.
    +func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"devicePath":        autorest.Encode("path", devicePath),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ListRoutesTableSender sends the ListRoutesTable request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// ListRoutesTableSummary the ListRoutesTable from ExpressRouteCircuit
    +// operation retrieves the currently advertised routes table associated with
    +// the ExpressRouteCircuits in a resource group. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. circuitName is the
    +// name of the circuit. peeringName is the name of the peering. devicePath is
    +// the path of the device.
    +func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ListRoutesTableSummaryPreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListRoutesTableSummarySender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListRoutesTableSummaryResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListRoutesTableSummaryPreparer prepares the ListRoutesTableSummary request.
    +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"circuitName":       autorest.Encode("path", circuitName),
    +		"devicePath":        autorest.Encode("path", devicePath),
    +		"peeringName":       autorest.Encode("path", peeringName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ListRoutesTableSummarySender sends the ListRoutesTableSummary request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ListRoutesTableSummaryResponder handles the response to the ListRoutesTableSummary request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go
    new file mode 100644
    index 0000000..b65d60c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go
    @@ -0,0 +1,129 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// ExpressRouteServiceProvidersClient is the the Microsoft Azure Network
    +// management API provides a RESTful set of web services that interact with
    +// Microsoft Azure Networks service to manage your network resources. The API
    +// has entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type ExpressRouteServiceProvidersClient struct {
    +	ManagementClient
    +}
    +
    +// NewExpressRouteServiceProvidersClient creates an instance of the
    +// ExpressRouteServiceProvidersClient client.
    +func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteServiceProvidersClient {
    +	return NewExpressRouteServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the
    +// ExpressRouteServiceProvidersClient client.
    +func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient {
    +	return ExpressRouteServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// List the List ExpressRouteServiceProvider operation retrieves all the
    +// available ExpressRouteServiceProviders.
    +func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, err error) {
    +	req, err := client.ListPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client ExpressRouteServiceProvidersClient) ListResponder(resp *http.Response) (result ExpressRouteServiceProviderListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client ExpressRouteServiceProvidersClient) ListNextResults(lastResults ExpressRouteServiceProviderListResult) (result ExpressRouteServiceProviderListResult, err error) {
    +	req, err := lastResults.ExpressRouteServiceProviderListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go
    new file mode 100644
    index 0000000..4a5309a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go
    @@ -0,0 +1,821 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// InterfacesClient is the the Microsoft Azure Network management API provides
    +// a RESTful set of web services that interact with Microsoft Azure Networks
    +// service to manage your network resources. The API has entities that
    +// capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type InterfacesClient struct {
    +	ManagementClient
    +}
    +
    +// NewInterfacesClient creates an instance of the InterfacesClient client.
    +func NewInterfacesClient(subscriptionID string) InterfacesClient {
    +	return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient
    +// client.
    +func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient {
    +	return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put NetworkInterface operation creates/updates a
    +// networkInterface This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. networkInterfaceName
    +// is the name of the network interface. parameters is parameters supplied to
    +// the create/update NetworkInterface operation
    +func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkSecurityGroup", Name: validation.Null, Rule: false,
    +					Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkSecurityGroup.Properties", Name: validation.Null, Rule: false,
    +						Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkSecurityGroup.Properties.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +							{Target: "parameters.Properties.NetworkSecurityGroup.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +						}},
    +					}},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.InterfacesClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
    +		"resourceGroupName":    autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":       autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete netwokInterface operation deletes the specified
    +// netwokInterface. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. networkInterfaceName
    +// is the name of the network interface.
    +func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
    +		"resourceGroupName":    autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":       autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get network interface operation retrieves information about the
    +// specified network interface.
    +//
    +// resourceGroupName is the name of the resource group. networkInterfaceName
    +// is the name of the network interface. expand is expand references
    +// resources.
    +func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
    +		"resourceGroupName":    autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":       autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// GetEffectiveRouteTable the get effective routetable operation retrieves all
    +// the route tables applied on a networkInterface. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. networkInterfaceName
    +// is the name of the network interface.
    +func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetEffectiveRouteTableSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetEffectiveRouteTableResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetEffectiveRouteTablePreparer prepares the GetEffectiveRouteTable request.
    +func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
    +		"resourceGroupName":    autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":       autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// GetEffectiveRouteTableSender sends the GetEffectiveRouteTable request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// GetEffectiveRouteTableResponder handles the response to the GetEffectiveRouteTable request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// GetVirtualMachineScaleSetNetworkInterface the Get network interface
    +// operation retrieves information about the specified network interface in a
    +// virtual machine scale set.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualMachineScaleSetName is the name of the virtual machine scale set.
    +// virtualmachineIndex is the virtual machine index. networkInterfaceName is
    +// the name of the network interface. expand is expand references resources.
    +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) {
    +	req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request.
    +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkInterfaceName":       autorest.Encode("path", networkInterfaceName),
    +		"resourceGroupName":          autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":             autorest.Encode("path", client.SubscriptionID),
    +		"virtualmachineIndex":        autorest.Encode("path", virtualmachineIndex),
    +		"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List networkInterfaces operation retrieves all the
    +// networkInterfaces in a resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
    +	req, err := lastResults.InterfaceListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the List networkInterfaces operation retrieves all the
    +// networkInterfaces in a subscription.
    +func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client InterfacesClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
    +	req, err := lastResults.InterfaceListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListEffectiveNetworkSecurityGroups the list effective network security
    +// group operation retrieves all the network security groups applied on a
    +// networkInterface. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. networkInterfaceName
    +// is the name of the network interface.
    +func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListEffectiveNetworkSecurityGroupsPreparer prepares the ListEffectiveNetworkSecurityGroups request.
    +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
    +		"resourceGroupName":    autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":       autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ListEffectiveNetworkSecurityGroupsSender sends the ListEffectiveNetworkSecurityGroups request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ListEffectiveNetworkSecurityGroupsResponder handles the response to the ListEffectiveNetworkSecurityGroups request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// ListVirtualMachineScaleSetNetworkInterfaces the list network interface
    +// operation retrieves information about all network interfaces in a virtual
    +// machine scale set.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualMachineScaleSetName is the name of the virtual machine scale set.
    +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) {
    +	req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request.
    +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":          autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":             autorest.Encode("path", client.SubscriptionID),
    +		"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any.
    +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
    +	req, err := lastResults.InterfaceListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListVirtualMachineScaleSetVMNetworkInterfaces the list network interface
    +// operation retrieves information about all network interfaces in a virtual
    +// machine from a virtual machine scale set.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualMachineScaleSetName is the name of the virtual machine scale set.
    +// virtualmachineIndex is the virtual machine index.
    +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) {
    +	req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request.
    +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":          autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":             autorest.Encode("path", client.SubscriptionID),
    +		"virtualmachineIndex":        autorest.Encode("path", virtualmachineIndex),
    +		"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always
    +// closes the http.Response Body.
    +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any.
    +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
    +	req, err := lastResults.InterfaceListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go
    new file mode 100644
    index 0000000..3426ea0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go
    @@ -0,0 +1,419 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// LoadBalancersClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type LoadBalancersClient struct {
    +	ManagementClient
    +}
    +
    +// NewLoadBalancersClient creates an instance of the LoadBalancersClient
    +// client.
    +func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient {
    +	return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewLoadBalancersClientWithBaseURI creates an instance of the
    +// LoadBalancersClient client.
    +func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient {
    +	return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put LoadBalancer operation creates/updates a
    +// LoadBalancer This method may poll for completion. Polling can be canceled
    +// by passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. loadBalancerName is
    +// the name of the loadBalancer. parameters is parameters supplied to the
    +// create/delete LoadBalancer operation
    +func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"loadBalancerName":  autorest.Encode("path", loadBalancerName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete LoadBalancer operation deletes the specified load
    +// balancer. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. loadBalancerName is
    +// the name of the loadBalancer.
    +func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, loadBalancerName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"loadBalancerName":  autorest.Encode("path", loadBalancerName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get LoadBalancer operation retrieves information about the
    +// specified LoadBalancer.
    +//
    +// resourceGroupName is the name of the resource group. loadBalancerName is
    +// the name of the loadBalancer. expand is expand references resources.
    +func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, loadBalancerName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBalancerName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"loadBalancerName":  autorest.Encode("path", loadBalancerName),
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client LoadBalancersClient) GetResponder(resp *http.Response) (result LoadBalancer, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List loadBalancer operation retrieves all the load balancers in a
    +// resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client LoadBalancersClient) ListResponder(resp *http.Response) (result LoadBalancerListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) {
    +	req, err := lastResults.LoadBalancerListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the List loadBalancer operation retrieves all the load balancers in
    +// a subscription.
    +func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client LoadBalancersClient) ListAllResponder(resp *http.Response) (result LoadBalancerListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) {
    +	req, err := lastResults.LoadBalancerListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go
    new file mode 100644
    index 0000000..847054d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go
    @@ -0,0 +1,336 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// LocalNetworkGatewaysClient is the the Microsoft Azure Network management
    +// API provides a RESTful set of web services that interact with Microsoft
    +// Azure Networks service to manage your network resources. The API has
    +// entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type LocalNetworkGatewaysClient struct {
    +	ManagementClient
    +}
    +
    +// NewLocalNetworkGatewaysClient creates an instance of the
    +// LocalNetworkGatewaysClient client.
    +func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient {
    +	return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the
    +// LocalNetworkGatewaysClient client.
    +func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient {
    +	return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put LocalNetworkGateway operation creates/updates a
    +// local network gateway in the specified resource group through Network
    +// resource provider. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// localNetworkGatewayName is the name of the local network gateway.
    +// parameters is parameters supplied to the Begin Create or update Local
    +// Network Gateway operation through Network resource provider.
    +func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName),
    +		"resourceGroupName":       autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":          autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the Delete LocalNetworkGateway operation deletes the specified local
    +// network Gateway through Network resource provider. This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// localNetworkGatewayName is the name of the local network gateway.
    +func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName),
    +		"resourceGroupName":       autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":          autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get LocalNetworkGateway operation retrieves information about the
    +// specified local network gateway through Network resource provider.
    +//
    +// resourceGroupName is the name of the resource group.
    +// localNetworkGatewayName is the name of the local network gateway.
    +func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, localNetworkGatewayName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName),
    +		"resourceGroupName":       autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":          autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (result LocalNetworkGateway, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List LocalNetworkGateways operation retrieves all the local
    +// network gateways stored.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client LocalNetworkGatewaysClient) ListResponder(resp *http.Response) (result LocalNetworkGatewayListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, err error) {
    +	req, err := lastResults.LocalNetworkGatewayListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go
    new file mode 100644
    index 0000000..810fe45
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go
    @@ -0,0 +1,2148 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/to"
    +	"net/http"
    +)
    +
    +// ApplicationGatewayBackendHealthServerHealth enumerates the values for
    +// application gateway backend health server health.
    +type ApplicationGatewayBackendHealthServerHealth string
    +
    +const (
    +	// Down specifies the down state for application gateway backend health
    +	// server health.
    +	Down ApplicationGatewayBackendHealthServerHealth = "Down"
    +	// Partial specifies the partial state for application gateway backend
    +	// health server health.
    +	Partial ApplicationGatewayBackendHealthServerHealth = "Partial"
    +	// Unknown specifies the unknown state for application gateway backend
    +	// health server health.
    +	Unknown ApplicationGatewayBackendHealthServerHealth = "Unknown"
    +	// Up specifies the up state for application gateway backend health server
    +	// health.
    +	Up ApplicationGatewayBackendHealthServerHealth = "Up"
    +)
    +
    +// ApplicationGatewayCookieBasedAffinity enumerates the values for application
    +// gateway cookie based affinity.
    +type ApplicationGatewayCookieBasedAffinity string
    +
    +const (
    +	// Disabled specifies the disabled state for application gateway cookie
    +	// based affinity.
    +	Disabled ApplicationGatewayCookieBasedAffinity = "Disabled"
    +	// Enabled specifies the enabled state for application gateway cookie
    +	// based affinity.
    +	Enabled ApplicationGatewayCookieBasedAffinity = "Enabled"
    +)
    +
    +// ApplicationGatewayFirewallMode enumerates the values for application
    +// gateway firewall mode.
    +type ApplicationGatewayFirewallMode string
    +
    +const (
    +	// Detection specifies the detection state for application gateway
    +	// firewall mode.
    +	Detection ApplicationGatewayFirewallMode = "Detection"
    +	// Prevention specifies the prevention state for application gateway
    +	// firewall mode.
    +	Prevention ApplicationGatewayFirewallMode = "Prevention"
    +)
    +
    +// ApplicationGatewayOperationalState enumerates the values for application
    +// gateway operational state.
    +type ApplicationGatewayOperationalState string
    +
    +const (
    +	// Running specifies the running state for application gateway operational
    +	// state.
    +	Running ApplicationGatewayOperationalState = "Running"
    +	// Starting specifies the starting state for application gateway
    +	// operational state.
    +	Starting ApplicationGatewayOperationalState = "Starting"
    +	// Stopped specifies the stopped state for application gateway operational
    +	// state.
    +	Stopped ApplicationGatewayOperationalState = "Stopped"
    +	// Stopping specifies the stopping state for application gateway
    +	// operational state.
    +	Stopping ApplicationGatewayOperationalState = "Stopping"
    +)
    +
    +// ApplicationGatewayProtocol enumerates the values for application gateway
    +// protocol.
    +type ApplicationGatewayProtocol string
    +
    +const (
    +	// HTTP specifies the http state for application gateway protocol.
    +	HTTP ApplicationGatewayProtocol = "Http"
    +	// HTTPS specifies the https state for application gateway protocol.
    +	HTTPS ApplicationGatewayProtocol = "Https"
    +)
    +
    +// ApplicationGatewayRequestRoutingRuleType enumerates the values for
    +// application gateway request routing rule type.
    +type ApplicationGatewayRequestRoutingRuleType string
    +
    +const (
    +	// Basic specifies the basic state for application gateway request routing
    +	// rule type.
    +	Basic ApplicationGatewayRequestRoutingRuleType = "Basic"
    +	// PathBasedRouting specifies the path based routing state for application
    +	// gateway request routing rule type.
    +	PathBasedRouting ApplicationGatewayRequestRoutingRuleType = "PathBasedRouting"
    +)
    +
    +// ApplicationGatewaySkuName enumerates the values for application gateway sku
    +// name.
    +type ApplicationGatewaySkuName string
    +
    +const (
    +	// StandardLarge specifies the standard large state for application
    +	// gateway sku name.
    +	StandardLarge ApplicationGatewaySkuName = "Standard_Large"
    +	// StandardMedium specifies the standard medium state for application
    +	// gateway sku name.
    +	StandardMedium ApplicationGatewaySkuName = "Standard_Medium"
    +	// StandardSmall specifies the standard small state for application
    +	// gateway sku name.
    +	StandardSmall ApplicationGatewaySkuName = "Standard_Small"
    +	// WAFLarge specifies the waf large state for application gateway sku name.
    +	WAFLarge ApplicationGatewaySkuName = "WAF_Large"
    +	// WAFMedium specifies the waf medium state for application gateway sku
    +	// name.
    +	WAFMedium ApplicationGatewaySkuName = "WAF_Medium"
    +)
    +
    +// ApplicationGatewaySslProtocol enumerates the values for application gateway
    +// ssl protocol.
    +type ApplicationGatewaySslProtocol string
    +
    +const (
    +	// TLSv10 specifies the tl sv 10 state for application gateway ssl
    +	// protocol.
    +	TLSv10 ApplicationGatewaySslProtocol = "TLSv1_0"
    +	// TLSv11 specifies the tl sv 11 state for application gateway ssl
    +	// protocol.
    +	TLSv11 ApplicationGatewaySslProtocol = "TLSv1_1"
    +	// TLSv12 specifies the tl sv 12 state for application gateway ssl
    +	// protocol.
    +	TLSv12 ApplicationGatewaySslProtocol = "TLSv1_2"
    +)
    +
    +// ApplicationGatewayTier enumerates the values for application gateway tier.
    +type ApplicationGatewayTier string
    +
    +const (
    +	// Standard specifies the standard state for application gateway tier.
    +	Standard ApplicationGatewayTier = "Standard"
    +	// WAF specifies the waf state for application gateway tier.
    +	WAF ApplicationGatewayTier = "WAF"
    +)
    +
    +// AuthorizationUseStatus enumerates the values for authorization use status.
    +type AuthorizationUseStatus string
    +
    +const (
    +	// Available specifies the available state for authorization use status.
    +	Available AuthorizationUseStatus = "Available"
    +	// InUse specifies the in use state for authorization use status.
    +	InUse AuthorizationUseStatus = "InUse"
    +)
    +
    +// EffectiveRouteSource enumerates the values for effective route source.
    +type EffectiveRouteSource string
    +
    +const (
    +	// EffectiveRouteSourceDefault specifies the effective route source
    +	// default state for effective route source.
    +	EffectiveRouteSourceDefault EffectiveRouteSource = "Default"
    +	// EffectiveRouteSourceUnknown specifies the effective route source
    +	// unknown state for effective route source.
    +	EffectiveRouteSourceUnknown EffectiveRouteSource = "Unknown"
    +	// EffectiveRouteSourceUser specifies the effective route source user
    +	// state for effective route source.
    +	EffectiveRouteSourceUser EffectiveRouteSource = "User"
    +	// EffectiveRouteSourceVirtualNetworkGateway specifies the effective route
    +	// source virtual network gateway state for effective route source.
    +	EffectiveRouteSourceVirtualNetworkGateway EffectiveRouteSource = "VirtualNetworkGateway"
    +)
    +
    +// EffectiveRouteState enumerates the values for effective route state.
    +type EffectiveRouteState string
    +
    +const (
    +	// Active specifies the active state for effective route state.
    +	Active EffectiveRouteState = "Active"
    +	// Invalid specifies the invalid state for effective route state.
    +	Invalid EffectiveRouteState = "Invalid"
    +)
    +
    +// ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values
    +// for express route circuit peering advertised public prefix state.
    +type ExpressRouteCircuitPeeringAdvertisedPublicPrefixState string
    +
    +const (
    +	// Configured specifies the configured state for express route circuit
    +	// peering advertised public prefix state.
    +	Configured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configured"
    +	// Configuring specifies the configuring state for express route circuit
    +	// peering advertised public prefix state.
    +	Configuring ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configuring"
    +	// NotConfigured specifies the not configured state for express route
    +	// circuit peering advertised public prefix state.
    +	NotConfigured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "NotConfigured"
    +	// ValidationNeeded specifies the validation needed state for express
    +	// route circuit peering advertised public prefix state.
    +	ValidationNeeded ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "ValidationNeeded"
    +)
    +
    +// ExpressRouteCircuitPeeringState enumerates the values for express route
    +// circuit peering state.
    +type ExpressRouteCircuitPeeringState string
    +
    +const (
    +	// ExpressRouteCircuitPeeringStateDisabled specifies the express route
    +	// circuit peering state disabled state for express route circuit peering
    +	// state.
    +	ExpressRouteCircuitPeeringStateDisabled ExpressRouteCircuitPeeringState = "Disabled"
    +	// ExpressRouteCircuitPeeringStateEnabled specifies the express route
    +	// circuit peering state enabled state for express route circuit peering
    +	// state.
    +	ExpressRouteCircuitPeeringStateEnabled ExpressRouteCircuitPeeringState = "Enabled"
    +)
    +
    +// ExpressRouteCircuitPeeringType enumerates the values for express route
    +// circuit peering type.
    +type ExpressRouteCircuitPeeringType string
    +
    +const (
    +	// AzurePrivatePeering specifies the azure private peering state for
    +	// express route circuit peering type.
    +	AzurePrivatePeering ExpressRouteCircuitPeeringType = "AzurePrivatePeering"
    +	// AzurePublicPeering specifies the azure public peering state for express
    +	// route circuit peering type.
    +	AzurePublicPeering ExpressRouteCircuitPeeringType = "AzurePublicPeering"
    +	// MicrosoftPeering specifies the microsoft peering state for express
    +	// route circuit peering type.
    +	MicrosoftPeering ExpressRouteCircuitPeeringType = "MicrosoftPeering"
    +)
    +
    +// ExpressRouteCircuitSkuFamily enumerates the values for express route
    +// circuit sku family.
    +type ExpressRouteCircuitSkuFamily string
    +
    +const (
    +	// MeteredData specifies the metered data state for express route circuit
    +	// sku family.
    +	MeteredData ExpressRouteCircuitSkuFamily = "MeteredData"
    +	// UnlimitedData specifies the unlimited data state for express route
    +	// circuit sku family.
    +	UnlimitedData ExpressRouteCircuitSkuFamily = "UnlimitedData"
    +)
    +
    +// ExpressRouteCircuitSkuTier enumerates the values for express route circuit
    +// sku tier.
    +type ExpressRouteCircuitSkuTier string
    +
    +const (
    +	// ExpressRouteCircuitSkuTierPremium specifies the express route circuit
    +	// sku tier premium state for express route circuit sku tier.
    +	ExpressRouteCircuitSkuTierPremium ExpressRouteCircuitSkuTier = "Premium"
    +	// ExpressRouteCircuitSkuTierStandard specifies the express route circuit
    +	// sku tier standard state for express route circuit sku tier.
    +	ExpressRouteCircuitSkuTierStandard ExpressRouteCircuitSkuTier = "Standard"
    +)
    +
    +// IPAllocationMethod enumerates the values for ip allocation method.
    +type IPAllocationMethod string
    +
    +const (
    +	// Dynamic specifies the dynamic state for ip allocation method.
    +	Dynamic IPAllocationMethod = "Dynamic"
    +	// Static specifies the static state for ip allocation method.
    +	Static IPAllocationMethod = "Static"
    +)
    +
    +// IPVersion enumerates the values for ip version.
    +type IPVersion string
    +
    +const (
    +	// IPv4 specifies the i pv 4 state for ip version.
    +	IPv4 IPVersion = "IPv4"
    +	// IPv6 specifies the i pv 6 state for ip version.
    +	IPv6 IPVersion = "IPv6"
    +)
    +
    +// LoadDistribution enumerates the values for load distribution.
    +type LoadDistribution string
    +
    +const (
    +	// Default specifies the default state for load distribution.
    +	Default LoadDistribution = "Default"
    +	// SourceIP specifies the source ip state for load distribution.
    +	SourceIP LoadDistribution = "SourceIP"
    +	// SourceIPProtocol specifies the source ip protocol state for load
    +	// distribution.
    +	SourceIPProtocol LoadDistribution = "SourceIPProtocol"
    +)
    +
    +// OperationStatus enumerates the values for operation status.
    +type OperationStatus string
    +
    +const (
    +	// Failed specifies the failed state for operation status.
    +	Failed OperationStatus = "Failed"
    +	// InProgress specifies the in progress state for operation status.
    +	InProgress OperationStatus = "InProgress"
    +	// Succeeded specifies the succeeded state for operation status.
    +	Succeeded OperationStatus = "Succeeded"
    +)
    +
    +// ProbeProtocol enumerates the values for probe protocol.
    +type ProbeProtocol string
    +
    +const (
    +	// ProbeProtocolHTTP specifies the probe protocol http state for probe
    +	// protocol.
    +	ProbeProtocolHTTP ProbeProtocol = "Http"
    +	// ProbeProtocolTCP specifies the probe protocol tcp state for probe
    +	// protocol.
    +	ProbeProtocolTCP ProbeProtocol = "Tcp"
    +)
    +
    +// ProcessorArchitecture enumerates the values for processor architecture.
    +type ProcessorArchitecture string
    +
    +const (
    +	// Amd64 specifies the amd 64 state for processor architecture.
    +	Amd64 ProcessorArchitecture = "Amd64"
    +	// X86 specifies the x86 state for processor architecture.
    +	X86 ProcessorArchitecture = "X86"
    +)
    +
    +// RouteNextHopType enumerates the values for route next hop type.
    +type RouteNextHopType string
    +
    +const (
    +	// RouteNextHopTypeInternet specifies the route next hop type internet
    +	// state for route next hop type.
    +	RouteNextHopTypeInternet RouteNextHopType = "Internet"
    +	// RouteNextHopTypeNone specifies the route next hop type none state for
    +	// route next hop type.
    +	RouteNextHopTypeNone RouteNextHopType = "None"
    +	// RouteNextHopTypeVirtualAppliance specifies the route next hop type
    +	// virtual appliance state for route next hop type.
    +	RouteNextHopTypeVirtualAppliance RouteNextHopType = "VirtualAppliance"
    +	// RouteNextHopTypeVirtualNetworkGateway specifies the route next hop type
    +	// virtual network gateway state for route next hop type.
    +	RouteNextHopTypeVirtualNetworkGateway RouteNextHopType = "VirtualNetworkGateway"
    +	// RouteNextHopTypeVnetLocal specifies the route next hop type vnet local
    +	// state for route next hop type.
    +	RouteNextHopTypeVnetLocal RouteNextHopType = "VnetLocal"
    +)
    +
    +// SecurityRuleAccess enumerates the values for security rule access.
    +type SecurityRuleAccess string
    +
    +const (
    +	// Allow specifies the allow state for security rule access.
    +	Allow SecurityRuleAccess = "Allow"
    +	// Deny specifies the deny state for security rule access.
    +	Deny SecurityRuleAccess = "Deny"
    +)
    +
    +// SecurityRuleDirection enumerates the values for security rule direction.
    +type SecurityRuleDirection string
    +
    +const (
    +	// Inbound specifies the inbound state for security rule direction.
    +	Inbound SecurityRuleDirection = "Inbound"
    +	// Outbound specifies the outbound state for security rule direction.
    +	Outbound SecurityRuleDirection = "Outbound"
    +)
    +
    +// SecurityRuleProtocol enumerates the values for security rule protocol.
    +type SecurityRuleProtocol string
    +
    +const (
    +	// Asterisk specifies the asterisk state for security rule protocol.
    +	Asterisk SecurityRuleProtocol = "*"
    +	// TCP specifies the tcp state for security rule protocol.
    +	TCP SecurityRuleProtocol = "Tcp"
    +	// UDP specifies the udp state for security rule protocol.
    +	UDP SecurityRuleProtocol = "Udp"
    +)
    +
    +// ServiceProviderProvisioningState enumerates the values for service provider
    +// provisioning state.
    +type ServiceProviderProvisioningState string
    +
    +const (
    +	// Deprovisioning specifies the deprovisioning state for service provider
    +	// provisioning state.
    +	Deprovisioning ServiceProviderProvisioningState = "Deprovisioning"
    +	// NotProvisioned specifies the not provisioned state for service provider
    +	// provisioning state.
    +	NotProvisioned ServiceProviderProvisioningState = "NotProvisioned"
    +	// Provisioned specifies the provisioned state for service provider
    +	// provisioning state.
    +	Provisioned ServiceProviderProvisioningState = "Provisioned"
    +	// Provisioning specifies the provisioning state for service provider
    +	// provisioning state.
    +	Provisioning ServiceProviderProvisioningState = "Provisioning"
    +)
    +
    +// TransportProtocol enumerates the values for transport protocol.
    +type TransportProtocol string
    +
    +const (
    +	// TransportProtocolTCP specifies the transport protocol tcp state for
    +	// transport protocol.
    +	TransportProtocolTCP TransportProtocol = "Tcp"
    +	// TransportProtocolUDP specifies the transport protocol udp state for
    +	// transport protocol.
    +	TransportProtocolUDP TransportProtocol = "Udp"
    +)
    +
    +// VirtualNetworkGatewayConnectionStatus enumerates the values for virtual
    +// network gateway connection status.
    +type VirtualNetworkGatewayConnectionStatus string
    +
    +const (
    +	// VirtualNetworkGatewayConnectionStatusConnected specifies the virtual
    +	// network gateway connection status connected state for virtual network
    +	// gateway connection status.
    +	VirtualNetworkGatewayConnectionStatusConnected VirtualNetworkGatewayConnectionStatus = "Connected"
    +	// VirtualNetworkGatewayConnectionStatusConnecting specifies the virtual
    +	// network gateway connection status connecting state for virtual network
    +	// gateway connection status.
    +	VirtualNetworkGatewayConnectionStatusConnecting VirtualNetworkGatewayConnectionStatus = "Connecting"
    +	// VirtualNetworkGatewayConnectionStatusNotConnected specifies the virtual
    +	// network gateway connection status not connected state for virtual
    +	// network gateway connection status.
    +	VirtualNetworkGatewayConnectionStatusNotConnected VirtualNetworkGatewayConnectionStatus = "NotConnected"
    +	// VirtualNetworkGatewayConnectionStatusUnknown specifies the virtual
    +	// network gateway connection status unknown state for virtual network
    +	// gateway connection status.
    +	VirtualNetworkGatewayConnectionStatusUnknown VirtualNetworkGatewayConnectionStatus = "Unknown"
    +)
    +
    +// VirtualNetworkGatewayConnectionType enumerates the values for virtual
    +// network gateway connection type.
    +type VirtualNetworkGatewayConnectionType string
    +
    +const (
    +	// ExpressRoute specifies the express route state for virtual network
    +	// gateway connection type.
    +	ExpressRoute VirtualNetworkGatewayConnectionType = "ExpressRoute"
    +	// IPsec specifies the i psec state for virtual network gateway connection
    +	// type.
    +	IPsec VirtualNetworkGatewayConnectionType = "IPsec"
    +	// Vnet2Vnet specifies the vnet 2 vnet state for virtual network gateway
    +	// connection type.
    +	Vnet2Vnet VirtualNetworkGatewayConnectionType = "Vnet2Vnet"
    +	// VPNClient specifies the vpn client state for virtual network gateway
    +	// connection type.
    +	VPNClient VirtualNetworkGatewayConnectionType = "VPNClient"
    +)
    +
    +// VirtualNetworkGatewaySkuName enumerates the values for virtual network
    +// gateway sku name.
    +type VirtualNetworkGatewaySkuName string
    +
    +const (
    +	// VirtualNetworkGatewaySkuNameBasic specifies the virtual network gateway
    +	// sku name basic state for virtual network gateway sku name.
    +	VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic"
    +	// VirtualNetworkGatewaySkuNameHighPerformance specifies the virtual
    +	// network gateway sku name high performance state for virtual network
    +	// gateway sku name.
    +	VirtualNetworkGatewaySkuNameHighPerformance VirtualNetworkGatewaySkuName = "HighPerformance"
    +	// VirtualNetworkGatewaySkuNameStandard specifies the virtual network
    +	// gateway sku name standard state for virtual network gateway sku name.
    +	VirtualNetworkGatewaySkuNameStandard VirtualNetworkGatewaySkuName = "Standard"
    +	// VirtualNetworkGatewaySkuNameUltraPerformance specifies the virtual
    +	// network gateway sku name ultra performance state for virtual network
    +	// gateway sku name.
    +	VirtualNetworkGatewaySkuNameUltraPerformance VirtualNetworkGatewaySkuName = "UltraPerformance"
    +)
    +
    +// VirtualNetworkGatewaySkuTier enumerates the values for virtual network
    +// gateway sku tier.
    +type VirtualNetworkGatewaySkuTier string
    +
    +const (
    +	// VirtualNetworkGatewaySkuTierBasic specifies the virtual network gateway
    +	// sku tier basic state for virtual network gateway sku tier.
    +	VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic"
    +	// VirtualNetworkGatewaySkuTierHighPerformance specifies the virtual
    +	// network gateway sku tier high performance state for virtual network
    +	// gateway sku tier.
    +	VirtualNetworkGatewaySkuTierHighPerformance VirtualNetworkGatewaySkuTier = "HighPerformance"
    +	// VirtualNetworkGatewaySkuTierStandard specifies the virtual network
    +	// gateway sku tier standard state for virtual network gateway sku tier.
    +	VirtualNetworkGatewaySkuTierStandard VirtualNetworkGatewaySkuTier = "Standard"
    +	// VirtualNetworkGatewaySkuTierUltraPerformance specifies the virtual
    +	// network gateway sku tier ultra performance state for virtual network
    +	// gateway sku tier.
    +	VirtualNetworkGatewaySkuTierUltraPerformance VirtualNetworkGatewaySkuTier = "UltraPerformance"
    +)
    +
    +// VirtualNetworkGatewayType enumerates the values for virtual network gateway
    +// type.
    +type VirtualNetworkGatewayType string
    +
    +const (
    +	// VirtualNetworkGatewayTypeExpressRoute specifies the virtual network
    +	// gateway type express route state for virtual network gateway type.
    +	VirtualNetworkGatewayTypeExpressRoute VirtualNetworkGatewayType = "ExpressRoute"
    +	// VirtualNetworkGatewayTypeVpn specifies the virtual network gateway type
    +	// vpn state for virtual network gateway type.
    +	VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn"
    +)
    +
    +// VirtualNetworkPeeringState enumerates the values for virtual network
    +// peering state.
    +type VirtualNetworkPeeringState string
    +
    +const (
    +	// Connected specifies the connected state for virtual network peering
    +	// state.
    +	Connected VirtualNetworkPeeringState = "Connected"
    +	// Disconnected specifies the disconnected state for virtual network
    +	// peering state.
    +	Disconnected VirtualNetworkPeeringState = "Disconnected"
    +	// Initiated specifies the initiated state for virtual network peering
    +	// state.
    +	Initiated VirtualNetworkPeeringState = "Initiated"
    +)
    +
    +// VpnType enumerates the values for vpn type.
    +type VpnType string
    +
    +const (
    +	// PolicyBased specifies the policy based state for vpn type.
    +	PolicyBased VpnType = "PolicyBased"
    +	// RouteBased specifies the route based state for vpn type.
    +	RouteBased VpnType = "RouteBased"
    +)
    +
    +// AddressSpace is addressSpace contains an array of IP address ranges that
    +// can be used by subnets
    +type AddressSpace struct {
    +	AddressPrefixes *[]string `json:"addressPrefixes,omitempty"`
    +}
    +
    +// ApplicationGateway is applicationGateways resource
    +type ApplicationGateway struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                             `json:"id,omitempty"`
    +	Name              *string                             `json:"name,omitempty"`
    +	Type              *string                             `json:"type,omitempty"`
    +	Location          *string                             `json:"location,omitempty"`
    +	Tags              *map[string]*string                 `json:"tags,omitempty"`
    +	Properties        *ApplicationGatewayPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                             `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayAuthenticationCertificate is authentication certificates
    +// of application gateway
    +type ApplicationGatewayAuthenticationCertificate struct {
    +	ID         *string                                                      `json:"id,omitempty"`
    +	Properties *ApplicationGatewayAuthenticationCertificatePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                                      `json:"name,omitempty"`
    +	Etag       *string                                                      `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayAuthenticationCertificatePropertiesFormat is properties
    +// of Authentication certificates of application gateway
    +type ApplicationGatewayAuthenticationCertificatePropertiesFormat struct {
    +	Data              *string `json:"data,omitempty"`
    +	ProvisioningState *string `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendAddress is backend Address of application gateway
    +type ApplicationGatewayBackendAddress struct {
    +	Fqdn      *string `json:"fqdn,omitempty"`
    +	IPAddress *string `json:"ipAddress,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendAddressPool is backend Address Pool of application
    +// gateway
    +type ApplicationGatewayBackendAddressPool struct {
    +	ID         *string                                               `json:"id,omitempty"`
    +	Properties *ApplicationGatewayBackendAddressPoolPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                               `json:"name,omitempty"`
    +	Etag       *string                                               `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of
    +// Backend Address Pool of application gateway
    +type ApplicationGatewayBackendAddressPoolPropertiesFormat struct {
    +	BackendIPConfigurations *[]InterfaceIPConfiguration         `json:"backendIPConfigurations,omitempty"`
    +	BackendAddresses        *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"`
    +	ProvisioningState       *string                             `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendHealth is list of backendhealth pools.
    +type ApplicationGatewayBackendHealth struct {
    +	autorest.Response   `json:"-"`
    +	BackendAddressPools *[]ApplicationGatewayBackendHealthPool `json:"backendAddressPools,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendHealthHTTPSettings is application gateway
    +// backendhealth http settings.
    +type ApplicationGatewayBackendHealthHTTPSettings struct {
    +	BackendHTTPSettings *ApplicationGatewayBackendHTTPSettings   `json:"backendHttpSettings,omitempty"`
    +	Servers             *[]ApplicationGatewayBackendHealthServer `json:"servers,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendHealthPool is application gateway backendhealth
    +// pool.
    +type ApplicationGatewayBackendHealthPool struct {
    +	BackendAddressPool            *ApplicationGatewayBackendAddressPool          `json:"backendAddressPool,omitempty"`
    +	BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHealthHTTPSettings `json:"backendHttpSettingsCollection,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendHealthServer is application gateway backendhealth
    +// http settings.
    +type ApplicationGatewayBackendHealthServer struct {
    +	Address         *string                                     `json:"address,omitempty"`
    +	IPConfiguration *SubResource                                `json:"ipConfiguration,omitempty"`
    +	Health          ApplicationGatewayBackendHealthServerHealth `json:"health,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendHTTPSettings is backend address pool settings of
    +// application gateway
    +type ApplicationGatewayBackendHTTPSettings struct {
    +	ID         *string                                                `json:"id,omitempty"`
    +	Properties *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                                `json:"name,omitempty"`
    +	Etag       *string                                                `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of
    +// Backend address pool settings of application gateway
    +type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct {
    +	Port                       *int32                                `json:"port,omitempty"`
    +	Protocol                   ApplicationGatewayProtocol            `json:"protocol,omitempty"`
    +	CookieBasedAffinity        ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"`
    +	RequestTimeout             *int32                                `json:"requestTimeout,omitempty"`
    +	Probe                      *SubResource                          `json:"probe,omitempty"`
    +	AuthenticationCertificates *[]SubResource                        `json:"authenticationCertificates,omitempty"`
    +	ProvisioningState          *string                               `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of
    +// application gateway
    +type ApplicationGatewayFrontendIPConfiguration struct {
    +	ID         *string                                                    `json:"id,omitempty"`
    +	Properties *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                                    `json:"name,omitempty"`
    +	Etag       *string                                                    `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayFrontendIPConfigurationPropertiesFormat is properties of
    +// Frontend IP configuration of application gateway
    +type ApplicationGatewayFrontendIPConfigurationPropertiesFormat struct {
    +	PrivateIPAddress          *string            `json:"privateIPAddress,omitempty"`
    +	PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"`
    +	Subnet                    *SubResource       `json:"subnet,omitempty"`
    +	PublicIPAddress           *SubResource       `json:"publicIPAddress,omitempty"`
    +	ProvisioningState         *string            `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayFrontendPort is frontend Port of application gateway
    +type ApplicationGatewayFrontendPort struct {
    +	ID         *string                                         `json:"id,omitempty"`
    +	Properties *ApplicationGatewayFrontendPortPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                         `json:"name,omitempty"`
    +	Etag       *string                                         `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend
    +// Port of application gateway
    +type ApplicationGatewayFrontendPortPropertiesFormat struct {
    +	Port              *int32  `json:"port,omitempty"`
    +	ProvisioningState *string `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayHTTPListener is http listener of application gateway
    +type ApplicationGatewayHTTPListener struct {
    +	ID         *string                                         `json:"id,omitempty"`
    +	Properties *ApplicationGatewayHTTPListenerPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                         `json:"name,omitempty"`
    +	Etag       *string                                         `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayHTTPListenerPropertiesFormat is properties of Http
    +// listener of application gateway
    +type ApplicationGatewayHTTPListenerPropertiesFormat struct {
    +	FrontendIPConfiguration     *SubResource               `json:"frontendIPConfiguration,omitempty"`
    +	FrontendPort                *SubResource               `json:"frontendPort,omitempty"`
    +	Protocol                    ApplicationGatewayProtocol `json:"protocol,omitempty"`
    +	HostName                    *string                    `json:"hostName,omitempty"`
    +	SslCertificate              *SubResource               `json:"sslCertificate,omitempty"`
    +	RequireServerNameIndication *bool                      `json:"requireServerNameIndication,omitempty"`
    +	ProvisioningState           *string                    `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayIPConfiguration is iP configuration of application gateway
    +type ApplicationGatewayIPConfiguration struct {
    +	ID         *string                                            `json:"id,omitempty"`
    +	Properties *ApplicationGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                            `json:"name,omitempty"`
    +	Etag       *string                                            `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayIPConfigurationPropertiesFormat is properties of IP
    +// configuration of application gateway
    +type ApplicationGatewayIPConfigurationPropertiesFormat struct {
    +	Subnet            *SubResource `json:"subnet,omitempty"`
    +	ProvisioningState *string      `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayListResult is response for ListApplicationGateways Api
    +// service call
    +type ApplicationGatewayListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ApplicationGateway `json:"value,omitempty"`
    +	NextLink          *string               `json:"nextLink,omitempty"`
    +}
    +
    +// ApplicationGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client ApplicationGatewayListResult) ApplicationGatewayListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// ApplicationGatewayPathRule is path rule of URL path map of application
    +// gateway
    +type ApplicationGatewayPathRule struct {
    +	ID         *string                                     `json:"id,omitempty"`
    +	Properties *ApplicationGatewayPathRulePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                     `json:"name,omitempty"`
    +	Etag       *string                                     `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayPathRulePropertiesFormat is properties of probe of
    +// application gateway
    +type ApplicationGatewayPathRulePropertiesFormat struct {
    +	Paths               *[]string    `json:"paths,omitempty"`
    +	BackendAddressPool  *SubResource `json:"backendAddressPool,omitempty"`
    +	BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"`
    +	ProvisioningState   *string      `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayProbe is probe of application gateway
    +type ApplicationGatewayProbe struct {
    +	ID         *string                                  `json:"id,omitempty"`
    +	Properties *ApplicationGatewayProbePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                  `json:"name,omitempty"`
    +	Etag       *string                                  `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayProbePropertiesFormat is properties of probe of
    +// application gateway
    +type ApplicationGatewayProbePropertiesFormat struct {
    +	Protocol           ApplicationGatewayProtocol `json:"protocol,omitempty"`
    +	Host               *string                    `json:"host,omitempty"`
    +	Path               *string                    `json:"path,omitempty"`
    +	Interval           *int32                     `json:"interval,omitempty"`
    +	Timeout            *int32                     `json:"timeout,omitempty"`
    +	UnhealthyThreshold *int32                     `json:"unhealthyThreshold,omitempty"`
    +	ProvisioningState  *string                    `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayPropertiesFormat is properties of Application Gateway
    +type ApplicationGatewayPropertiesFormat struct {
    +	Sku                                 *ApplicationGatewaySku                                 `json:"sku,omitempty"`
    +	SslPolicy                           *ApplicationGatewaySslPolicy                           `json:"sslPolicy,omitempty"`
    +	OperationalState                    ApplicationGatewayOperationalState                     `json:"operationalState,omitempty"`
    +	GatewayIPConfigurations             *[]ApplicationGatewayIPConfiguration                   `json:"gatewayIPConfigurations,omitempty"`
    +	AuthenticationCertificates          *[]ApplicationGatewayAuthenticationCertificate         `json:"authenticationCertificates,omitempty"`
    +	SslCertificates                     *[]ApplicationGatewaySslCertificate                    `json:"sslCertificates,omitempty"`
    +	FrontendIPConfigurations            *[]ApplicationGatewayFrontendIPConfiguration           `json:"frontendIPConfigurations,omitempty"`
    +	FrontendPorts                       *[]ApplicationGatewayFrontendPort                      `json:"frontendPorts,omitempty"`
    +	Probes                              *[]ApplicationGatewayProbe                             `json:"probes,omitempty"`
    +	BackendAddressPools                 *[]ApplicationGatewayBackendAddressPool                `json:"backendAddressPools,omitempty"`
    +	BackendHTTPSettingsCollection       *[]ApplicationGatewayBackendHTTPSettings               `json:"backendHttpSettingsCollection,omitempty"`
    +	HTTPListeners                       *[]ApplicationGatewayHTTPListener                      `json:"httpListeners,omitempty"`
    +	URLPathMaps                         *[]ApplicationGatewayURLPathMap                        `json:"urlPathMaps,omitempty"`
    +	RequestRoutingRules                 *[]ApplicationGatewayRequestRoutingRule                `json:"requestRoutingRules,omitempty"`
    +	WebApplicationFirewallConfiguration *ApplicationGatewayWebApplicationFirewallConfiguration `json:"webApplicationFirewallConfiguration,omitempty"`
    +	ResourceGUID                        *string                                                `json:"resourceGuid,omitempty"`
    +	ProvisioningState                   *string                                                `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayRequestRoutingRule is request routing rule of application
    +// gateway
    +type ApplicationGatewayRequestRoutingRule struct {
    +	ID         *string                                               `json:"id,omitempty"`
    +	Properties *ApplicationGatewayRequestRoutingRulePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                               `json:"name,omitempty"`
    +	Etag       *string                                               `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayRequestRoutingRulePropertiesFormat is properties of
    +// Request routing rule of application gateway
    +type ApplicationGatewayRequestRoutingRulePropertiesFormat struct {
    +	RuleType            ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"`
    +	BackendAddressPool  *SubResource                             `json:"backendAddressPool,omitempty"`
    +	BackendHTTPSettings *SubResource                             `json:"backendHttpSettings,omitempty"`
    +	HTTPListener        *SubResource                             `json:"httpListener,omitempty"`
    +	URLPathMap          *SubResource                             `json:"urlPathMap,omitempty"`
    +	ProvisioningState   *string                                  `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewaySku is sKU of application gateway
    +type ApplicationGatewaySku struct {
    +	Name     ApplicationGatewaySkuName `json:"name,omitempty"`
    +	Tier     ApplicationGatewayTier    `json:"tier,omitempty"`
    +	Capacity *int32                    `json:"capacity,omitempty"`
    +}
    +
    +// ApplicationGatewaySslCertificate is sSL certificates of application gateway
    +type ApplicationGatewaySslCertificate struct {
    +	ID         *string                                           `json:"id,omitempty"`
    +	Properties *ApplicationGatewaySslCertificatePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                           `json:"name,omitempty"`
    +	Etag       *string                                           `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewaySslCertificatePropertiesFormat is properties of SSL
    +// certificates of application gateway
    +type ApplicationGatewaySslCertificatePropertiesFormat struct {
    +	Data              *string `json:"data,omitempty"`
    +	Password          *string `json:"password,omitempty"`
    +	PublicCertData    *string `json:"publicCertData,omitempty"`
    +	ProvisioningState *string `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewaySslPolicy is application gateway SSL policy
    +type ApplicationGatewaySslPolicy struct {
    +	DisabledSslProtocols *[]ApplicationGatewaySslProtocol `json:"disabledSslProtocols,omitempty"`
    +}
    +
    +// ApplicationGatewayURLPathMap is urlPathMap of application gateway
    +type ApplicationGatewayURLPathMap struct {
    +	ID         *string                                       `json:"id,omitempty"`
    +	Properties *ApplicationGatewayURLPathMapPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                       `json:"name,omitempty"`
    +	Etag       *string                                       `json:"etag,omitempty"`
    +}
    +
    +// ApplicationGatewayURLPathMapPropertiesFormat is properties of UrlPathMap of
    +// application gateway
    +type ApplicationGatewayURLPathMapPropertiesFormat struct {
    +	DefaultBackendAddressPool  *SubResource                  `json:"defaultBackendAddressPool,omitempty"`
    +	DefaultBackendHTTPSettings *SubResource                  `json:"defaultBackendHttpSettings,omitempty"`
    +	PathRules                  *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"`
    +	ProvisioningState          *string                       `json:"provisioningState,omitempty"`
    +}
    +
    +// ApplicationGatewayWebApplicationFirewallConfiguration is application
    +// gateway web application firewall configuration
    +type ApplicationGatewayWebApplicationFirewallConfiguration struct {
    +	Enabled      *bool                          `json:"enabled,omitempty"`
    +	FirewallMode ApplicationGatewayFirewallMode `json:"firewallMode,omitempty"`
    +}
    +
    +// AuthorizationListResult is response for ListAuthorizations Api service
    +// callRetrieves all authorizations that belongs to an ExpressRouteCircuit
    +type AuthorizationListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteCircuitAuthorization `json:"value,omitempty"`
    +	NextLink          *string                             `json:"nextLink,omitempty"`
    +}
    +
    +// AuthorizationListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client AuthorizationListResult) AuthorizationListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// AuthorizationPropertiesFormat is
    +type AuthorizationPropertiesFormat struct {
    +	AuthorizationKey       *string                `json:"authorizationKey,omitempty"`
    +	AuthorizationUseStatus AuthorizationUseStatus `json:"authorizationUseStatus,omitempty"`
    +	ProvisioningState      *string                `json:"provisioningState,omitempty"`
    +}
    +
    +// AzureAsyncOperationResult is the response body contains the status of the
    +// specified asynchronous operation, indicating whether it has succeeded, is
    +// in progress, or has failed. Note that this status is distinct from the
    +// HTTP status code returned for the Get Operation Status operation itself.
    +// If the asynchronous operation succeeded, the response body includes the
    +// HTTP status code for the successful request. If the asynchronous operation
    +// failed, the response body includes the HTTP status code for the failed
    +// request and error information regarding the failure.
    +type AzureAsyncOperationResult struct {
    +	Status OperationStatus `json:"status,omitempty"`
    +	Error  *Error          `json:"error,omitempty"`
    +}
    +
    +// BackendAddressPool is pool of backend IP addresses
    +type BackendAddressPool struct {
    +	ID         *string                             `json:"id,omitempty"`
    +	Properties *BackendAddressPoolPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                             `json:"name,omitempty"`
    +	Etag       *string                             `json:"etag,omitempty"`
    +}
    +
    +// BackendAddressPoolPropertiesFormat is properties of BackendAddressPool
    +type BackendAddressPoolPropertiesFormat struct {
    +	BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"`
    +	LoadBalancingRules      *[]SubResource              `json:"loadBalancingRules,omitempty"`
    +	OutboundNatRule         *SubResource                `json:"outboundNatRule,omitempty"`
    +	ProvisioningState       *string                     `json:"provisioningState,omitempty"`
    +}
    +
    +// BgpSettings is
    +type BgpSettings struct {
    +	Asn               *int64  `json:"asn,omitempty"`
    +	BgpPeeringAddress *string `json:"bgpPeeringAddress,omitempty"`
    +	PeerWeight        *int32  `json:"peerWeight,omitempty"`
    +}
    +
    +// ConnectionResetSharedKey is
    +type ConnectionResetSharedKey struct {
    +	autorest.Response `json:"-"`
    +	KeyLength         *int64 `json:"keyLength,omitempty"`
    +}
    +
    +// ConnectionSharedKey is response for GetConnectionSharedKey Api service call
    +type ConnectionSharedKey struct {
    +	autorest.Response `json:"-"`
    +	Value             *string `json:"value,omitempty"`
    +}
    +
    +// ConnectionSharedKeyResult is response for CheckConnectionSharedKey Api
    +// service call
    +type ConnectionSharedKeyResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *string `json:"value,omitempty"`
    +}
    +
    +// DhcpOptions is dHCPOptions contains an array of DNS servers available to
    +// VMs deployed in the virtual networkStandard DHCP option for a subnet
    +// overrides VNET DHCP options.
    +type DhcpOptions struct {
    +	DNSServers *[]string `json:"dnsServers,omitempty"`
    +}
    +
    +// DNSNameAvailabilityResult is response for CheckDnsNameAvailability Api
    +// service call
    +type DNSNameAvailabilityResult struct {
    +	autorest.Response `json:"-"`
    +	Available         *bool `json:"available,omitempty"`
    +}
    +
    +// EffectiveNetworkSecurityGroup is effective NetworkSecurityGroup
    +type EffectiveNetworkSecurityGroup struct {
    +	NetworkSecurityGroup   *SubResource                              `json:"networkSecurityGroup,omitempty"`
    +	Association            *EffectiveNetworkSecurityGroupAssociation `json:"association,omitempty"`
    +	EffectiveSecurityRules *[]EffectiveNetworkSecurityRule           `json:"effectiveSecurityRules,omitempty"`
    +}
    +
    +// EffectiveNetworkSecurityGroupAssociation is effective NetworkSecurityGroup
    +// association
    +type EffectiveNetworkSecurityGroupAssociation struct {
    +	Subnet           *SubResource `json:"subnet,omitempty"`
    +	NetworkInterface *SubResource `json:"networkInterface,omitempty"`
    +}
    +
    +// EffectiveNetworkSecurityGroupListResult is response for list effective
    +// network security groups api service call
    +type EffectiveNetworkSecurityGroupListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]EffectiveNetworkSecurityGroup `json:"value,omitempty"`
    +	NextLink          *string                          `json:"nextLink,omitempty"`
    +}
    +
    +// EffectiveNetworkSecurityRule is effective NetworkSecurityRules
    +type EffectiveNetworkSecurityRule struct {
    +	Name                             *string               `json:"name,omitempty"`
    +	Protocol                         SecurityRuleProtocol  `json:"protocol,omitempty"`
    +	SourcePortRange                  *string               `json:"sourcePortRange,omitempty"`
    +	DestinationPortRange             *string               `json:"destinationPortRange,omitempty"`
    +	SourceAddressPrefix              *string               `json:"sourceAddressPrefix,omitempty"`
    +	DestinationAddressPrefix         *string               `json:"destinationAddressPrefix,omitempty"`
    +	ExpandedSourceAddressPrefix      *[]string             `json:"expandedSourceAddressPrefix,omitempty"`
    +	ExpandedDestinationAddressPrefix *[]string             `json:"expandedDestinationAddressPrefix,omitempty"`
    +	Access                           SecurityRuleAccess    `json:"access,omitempty"`
    +	Priority                         *int32                `json:"priority,omitempty"`
    +	Direction                        SecurityRuleDirection `json:"direction,omitempty"`
    +}
    +
    +// EffectiveRoute is effective Route
    +type EffectiveRoute struct {
    +	Name             *string              `json:"name,omitempty"`
    +	Source           EffectiveRouteSource `json:"source,omitempty"`
    +	State            EffectiveRouteState  `json:"state,omitempty"`
    +	AddressPrefix    *[]string            `json:"addressPrefix,omitempty"`
    +	NextHopIPAddress *[]string            `json:"nextHopIpAddress,omitempty"`
    +	NextHopType      RouteNextHopType     `json:"nextHopType,omitempty"`
    +}
    +
    +// EffectiveRouteListResult is response for list effective route api service
    +// call
    +type EffectiveRouteListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]EffectiveRoute `json:"value,omitempty"`
    +	NextLink          *string           `json:"nextLink,omitempty"`
    +}
    +
    +// Error is
    +type Error struct {
    +	Code       *string         `json:"code,omitempty"`
    +	Message    *string         `json:"message,omitempty"`
    +	Target     *string         `json:"target,omitempty"`
    +	Details    *[]ErrorDetails `json:"details,omitempty"`
    +	InnerError *string         `json:"innerError,omitempty"`
    +}
    +
    +// ErrorDetails is
    +type ErrorDetails struct {
    +	Code    *string `json:"code,omitempty"`
    +	Target  *string `json:"target,omitempty"`
    +	Message *string `json:"message,omitempty"`
    +}
    +
    +// ExpressRouteCircuit is expressRouteCircuit resource
    +type ExpressRouteCircuit struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                              `json:"id,omitempty"`
    +	Name              *string                              `json:"name,omitempty"`
    +	Type              *string                              `json:"type,omitempty"`
    +	Location          *string                              `json:"location,omitempty"`
    +	Tags              *map[string]*string                  `json:"tags,omitempty"`
    +	Sku               *ExpressRouteCircuitSku              `json:"sku,omitempty"`
    +	Properties        *ExpressRouteCircuitPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                              `json:"etag,omitempty"`
    +}
    +
    +// ExpressRouteCircuitArpTable is the arp table associated with the
    +// ExpressRouteCircuit
    +type ExpressRouteCircuitArpTable struct {
    +	Age        *int32  `json:"age,omitempty"`
    +	Interface  *string `json:"interface,omitempty"`
    +	IPAddress  *string `json:"ipAddress,omitempty"`
    +	MacAddress *string `json:"macAddress,omitempty"`
    +}
    +
    +// ExpressRouteCircuitAuthorization is authorization in a ExpressRouteCircuit
    +// resource
    +type ExpressRouteCircuitAuthorization struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                        `json:"id,omitempty"`
    +	Properties        *AuthorizationPropertiesFormat `json:"properties,omitempty"`
    +	Name              *string                        `json:"name,omitempty"`
    +	Etag              *string                        `json:"etag,omitempty"`
    +}
    +
    +// ExpressRouteCircuitListResult is response for ListExpressRouteCircuit Api
    +// service call
    +type ExpressRouteCircuitListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteCircuit `json:"value,omitempty"`
    +	NextLink          *string                `json:"nextLink,omitempty"`
    +}
    +
    +// ExpressRouteCircuitListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client ExpressRouteCircuitListResult) ExpressRouteCircuitListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// ExpressRouteCircuitPeering is peering in a ExpressRouteCircuit resource
    +type ExpressRouteCircuitPeering struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                                     `json:"id,omitempty"`
    +	Properties        *ExpressRouteCircuitPeeringPropertiesFormat `json:"properties,omitempty"`
    +	Name              *string                                     `json:"name,omitempty"`
    +	Etag              *string                                     `json:"etag,omitempty"`
    +}
    +
    +// ExpressRouteCircuitPeeringConfig is specifies the peering config
    +type ExpressRouteCircuitPeeringConfig struct {
    +	AdvertisedPublicPrefixes      *[]string                                             `json:"advertisedPublicPrefixes,omitempty"`
    +	AdvertisedPublicPrefixesState ExpressRouteCircuitPeeringAdvertisedPublicPrefixState `json:"advertisedPublicPrefixesState,omitempty"`
    +	CustomerASN                   *int32                                                `json:"customerASN,omitempty"`
    +	RoutingRegistryName           *string                                               `json:"routingRegistryName,omitempty"`
    +}
    +
    +// ExpressRouteCircuitPeeringListResult is response for ListPeering Api
    +// service callRetrieves all Peerings that belongs to an ExpressRouteCircuit
    +type ExpressRouteCircuitPeeringListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteCircuitPeering `json:"value,omitempty"`
    +	NextLink          *string                       `json:"nextLink,omitempty"`
    +}
    +
    +// ExpressRouteCircuitPeeringListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client ExpressRouteCircuitPeeringListResult) ExpressRouteCircuitPeeringListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// ExpressRouteCircuitPeeringPropertiesFormat is
    +type ExpressRouteCircuitPeeringPropertiesFormat struct {
    +	PeeringType                ExpressRouteCircuitPeeringType    `json:"peeringType,omitempty"`
    +	State                      ExpressRouteCircuitPeeringState   `json:"state,omitempty"`
    +	AzureASN                   *int32                            `json:"azureASN,omitempty"`
    +	PeerASN                    *int32                            `json:"peerASN,omitempty"`
    +	PrimaryPeerAddressPrefix   *string                           `json:"primaryPeerAddressPrefix,omitempty"`
    +	SecondaryPeerAddressPrefix *string                           `json:"secondaryPeerAddressPrefix,omitempty"`
    +	PrimaryAzurePort           *string                           `json:"primaryAzurePort,omitempty"`
    +	SecondaryAzurePort         *string                           `json:"secondaryAzurePort,omitempty"`
    +	SharedKey                  *string                           `json:"sharedKey,omitempty"`
    +	VlanID                     *int32                            `json:"vlanId,omitempty"`
    +	MicrosoftPeeringConfig     *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"`
    +	Stats                      *ExpressRouteCircuitStats         `json:"stats,omitempty"`
    +	ProvisioningState          *string                           `json:"provisioningState,omitempty"`
    +	GatewayManagerEtag         *string                           `json:"gatewayManagerEtag,omitempty"`
    +	LastModifiedBy             *string                           `json:"lastModifiedBy,omitempty"`
    +}
    +
    +// ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit
    +type ExpressRouteCircuitPropertiesFormat struct {
    +	AllowClassicOperations           *bool                                         `json:"allowClassicOperations,omitempty"`
    +	CircuitProvisioningState         *string                                       `json:"circuitProvisioningState,omitempty"`
    +	ServiceProviderProvisioningState ServiceProviderProvisioningState              `json:"serviceProviderProvisioningState,omitempty"`
    +	Authorizations                   *[]ExpressRouteCircuitAuthorization           `json:"authorizations,omitempty"`
    +	Peerings                         *[]ExpressRouteCircuitPeering                 `json:"peerings,omitempty"`
    +	ServiceKey                       *string                                       `json:"serviceKey,omitempty"`
    +	ServiceProviderNotes             *string                                       `json:"serviceProviderNotes,omitempty"`
    +	ServiceProviderProperties        *ExpressRouteCircuitServiceProviderProperties `json:"serviceProviderProperties,omitempty"`
    +	ProvisioningState                *string                                       `json:"provisioningState,omitempty"`
    +	GatewayManagerEtag               *string                                       `json:"gatewayManagerEtag,omitempty"`
    +}
    +
    +// ExpressRouteCircuitRoutesTable is the routes table associated with the
    +// ExpressRouteCircuit
    +type ExpressRouteCircuitRoutesTable struct {
    +	Network *string `json:"network,omitempty"`
    +	NextHop *string `json:"nextHop,omitempty"`
    +	LocPrf  *string `json:"locPrf,omitempty"`
    +	Weight  *int32  `json:"weight,omitempty"`
    +	Path    *string `json:"path,omitempty"`
    +}
    +
    +// ExpressRouteCircuitRoutesTableSummary is the routes table associated with
    +// the ExpressRouteCircuit
    +type ExpressRouteCircuitRoutesTableSummary struct {
    +	Neighbor    *string `json:"neighbor,omitempty"`
    +	V           *int32  `json:"v,omitempty"`
    +	As          *int32  `json:"as,omitempty"`
    +	UpDown      *string `json:"upDown,omitempty"`
    +	StatePfxRcd *string `json:"statePfxRcd,omitempty"`
    +}
    +
    +// ExpressRouteCircuitsArpTableListResult is response for ListArpTable
    +// associated with the Express Route Circuits Api
    +type ExpressRouteCircuitsArpTableListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteCircuitArpTable `json:"value,omitempty"`
    +	NextLink          *string                        `json:"nextLink,omitempty"`
    +}
    +
    +// ExpressRouteCircuitServiceProviderProperties is contains
    +// ServiceProviderProperties in an ExpressRouteCircuit
    +type ExpressRouteCircuitServiceProviderProperties struct {
    +	ServiceProviderName *string `json:"serviceProviderName,omitempty"`
    +	PeeringLocation     *string `json:"peeringLocation,omitempty"`
    +	BandwidthInMbps     *int32  `json:"bandwidthInMbps,omitempty"`
    +}
    +
    +// ExpressRouteCircuitSku is contains sku in an ExpressRouteCircuit
    +type ExpressRouteCircuitSku struct {
    +	Name   *string                      `json:"name,omitempty"`
    +	Tier   ExpressRouteCircuitSkuTier   `json:"tier,omitempty"`
    +	Family ExpressRouteCircuitSkuFamily `json:"family,omitempty"`
    +}
    +
    +// ExpressRouteCircuitsRoutesTableListResult is response for ListRoutesTable
    +// associated with the Express Route Circuits Api
    +type ExpressRouteCircuitsRoutesTableListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteCircuitRoutesTable `json:"value,omitempty"`
    +	NextLink          *string                           `json:"nextLink,omitempty"`
    +}
    +
    +// ExpressRouteCircuitsRoutesTableSummaryListResult is response for
    +// ListRoutesTable associated with the Express Route Circuits Api
    +type ExpressRouteCircuitsRoutesTableSummaryListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteCircuitRoutesTableSummary `json:"value,omitempty"`
    +	NextLink          *string                                  `json:"nextLink,omitempty"`
    +}
    +
    +// ExpressRouteCircuitStats is contains Stats associated with the peering
    +type ExpressRouteCircuitStats struct {
    +	autorest.Response `json:"-"`
    +	PrimarybytesIn    *int64 `json:"primarybytesIn,omitempty"`
    +	PrimarybytesOut   *int64 `json:"primarybytesOut,omitempty"`
    +	SecondarybytesIn  *int64 `json:"secondarybytesIn,omitempty"`
    +	SecondarybytesOut *int64 `json:"secondarybytesOut,omitempty"`
    +}
    +
    +// ExpressRouteServiceProvider is expressRouteResourceProvider object
    +type ExpressRouteServiceProvider struct {
    +	ID         *string                                      `json:"id,omitempty"`
    +	Name       *string                                      `json:"name,omitempty"`
    +	Type       *string                                      `json:"type,omitempty"`
    +	Location   *string                                      `json:"location,omitempty"`
    +	Tags       *map[string]*string                          `json:"tags,omitempty"`
    +	Properties *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"`
    +}
    +
    +// ExpressRouteServiceProviderBandwidthsOffered is contains Bandwidths offered
    +// in ExpressRouteServiceProviders
    +type ExpressRouteServiceProviderBandwidthsOffered struct {
    +	OfferName   *string `json:"offerName,omitempty"`
    +	ValueInMbps *int32  `json:"valueInMbps,omitempty"`
    +}
    +
    +// ExpressRouteServiceProviderListResult is response for
    +// ListExpressRouteServiceProvider Api service call
    +type ExpressRouteServiceProviderListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]ExpressRouteServiceProvider `json:"value,omitempty"`
    +	NextLink          *string                        `json:"nextLink,omitempty"`
    +}
    +
    +// ExpressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client ExpressRouteServiceProviderListResult) ExpressRouteServiceProviderListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// ExpressRouteServiceProviderPropertiesFormat is properties of
    +// ExpressRouteServiceProvider
    +type ExpressRouteServiceProviderPropertiesFormat struct {
    +	PeeringLocations  *[]string                                       `json:"peeringLocations,omitempty"`
    +	BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"`
    +	ProvisioningState *string                                         `json:"provisioningState,omitempty"`
    +}
    +
    +// FrontendIPConfiguration is frontend IP address of the load balancer
    +type FrontendIPConfiguration struct {
    +	ID         *string                                  `json:"id,omitempty"`
    +	Properties *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                  `json:"name,omitempty"`
    +	Etag       *string                                  `json:"etag,omitempty"`
    +}
    +
    +// FrontendIPConfigurationPropertiesFormat is properties of Frontend IP
    +// Configuration of the load balancer
    +type FrontendIPConfigurationPropertiesFormat struct {
    +	InboundNatRules           *[]SubResource     `json:"inboundNatRules,omitempty"`
    +	InboundNatPools           *[]SubResource     `json:"inboundNatPools,omitempty"`
    +	OutboundNatRules          *[]SubResource     `json:"outboundNatRules,omitempty"`
    +	LoadBalancingRules        *[]SubResource     `json:"loadBalancingRules,omitempty"`
    +	PrivateIPAddress          *string            `json:"privateIPAddress,omitempty"`
    +	PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"`
    +	Subnet                    *Subnet            `json:"subnet,omitempty"`
    +	PublicIPAddress           *PublicIPAddress   `json:"publicIPAddress,omitempty"`
    +	ProvisioningState         *string            `json:"provisioningState,omitempty"`
    +}
    +
    +// InboundNatPool is inbound NAT pool of the load balancer
    +type InboundNatPool struct {
    +	ID         *string                         `json:"id,omitempty"`
    +	Properties *InboundNatPoolPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                         `json:"name,omitempty"`
    +	Etag       *string                         `json:"etag,omitempty"`
    +}
    +
    +// InboundNatPoolPropertiesFormat is properties of Inbound NAT pool
    +type InboundNatPoolPropertiesFormat struct {
    +	FrontendIPConfiguration *SubResource      `json:"frontendIPConfiguration,omitempty"`
    +	Protocol                TransportProtocol `json:"protocol,omitempty"`
    +	FrontendPortRangeStart  *int32            `json:"frontendPortRangeStart,omitempty"`
    +	FrontendPortRangeEnd    *int32            `json:"frontendPortRangeEnd,omitempty"`
    +	BackendPort             *int32            `json:"backendPort,omitempty"`
    +	ProvisioningState       *string           `json:"provisioningState,omitempty"`
    +}
    +
    +// InboundNatRule is inbound NAT rule of the loadbalancer
    +type InboundNatRule struct {
    +	ID         *string                         `json:"id,omitempty"`
    +	Properties *InboundNatRulePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                         `json:"name,omitempty"`
    +	Etag       *string                         `json:"etag,omitempty"`
    +}
    +
    +// InboundNatRulePropertiesFormat is properties of Inbound NAT rule
    +type InboundNatRulePropertiesFormat struct {
    +	FrontendIPConfiguration *SubResource              `json:"frontendIPConfiguration,omitempty"`
    +	BackendIPConfiguration  *InterfaceIPConfiguration `json:"backendIPConfiguration,omitempty"`
    +	Protocol                TransportProtocol         `json:"protocol,omitempty"`
    +	FrontendPort            *int32                    `json:"frontendPort,omitempty"`
    +	BackendPort             *int32                    `json:"backendPort,omitempty"`
    +	IdleTimeoutInMinutes    *int32                    `json:"idleTimeoutInMinutes,omitempty"`
    +	EnableFloatingIP        *bool                     `json:"enableFloatingIP,omitempty"`
    +	ProvisioningState       *string                   `json:"provisioningState,omitempty"`
    +}
    +
    +// Interface is a NetworkInterface in a resource group
    +type Interface struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                    `json:"id,omitempty"`
    +	Name              *string                    `json:"name,omitempty"`
    +	Type              *string                    `json:"type,omitempty"`
    +	Location          *string                    `json:"location,omitempty"`
    +	Tags              *map[string]*string        `json:"tags,omitempty"`
    +	Properties        *InterfacePropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                    `json:"etag,omitempty"`
    +}
    +
    +// InterfaceDNSSettings is dns settings of a network interface
    +type InterfaceDNSSettings struct {
    +	DNSServers               *[]string `json:"dnsServers,omitempty"`
    +	AppliedDNSServers        *[]string `json:"appliedDnsServers,omitempty"`
    +	InternalDNSNameLabel     *string   `json:"internalDnsNameLabel,omitempty"`
    +	InternalFqdn             *string   `json:"internalFqdn,omitempty"`
    +	InternalDomainNameSuffix *string   `json:"internalDomainNameSuffix,omitempty"`
    +}
    +
    +// InterfaceIPConfiguration is iPConfiguration in a NetworkInterface
    +type InterfaceIPConfiguration struct {
    +	ID         *string                                   `json:"id,omitempty"`
    +	Properties *InterfaceIPConfigurationPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                   `json:"name,omitempty"`
    +	Etag       *string                                   `json:"etag,omitempty"`
    +}
    +
    +// InterfaceIPConfigurationPropertiesFormat is properties of IPConfiguration
    +type InterfaceIPConfigurationPropertiesFormat struct {
    +	ApplicationGatewayBackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"applicationGatewayBackendAddressPools,omitempty"`
    +	LoadBalancerBackendAddressPools       *[]BackendAddressPool                   `json:"loadBalancerBackendAddressPools,omitempty"`
    +	LoadBalancerInboundNatRules           *[]InboundNatRule                       `json:"loadBalancerInboundNatRules,omitempty"`
    +	PrivateIPAddress                      *string                                 `json:"privateIPAddress,omitempty"`
    +	PrivateIPAllocationMethod             IPAllocationMethod                      `json:"privateIPAllocationMethod,omitempty"`
    +	PrivateIPAddressVersion               IPVersion                               `json:"privateIPAddressVersion,omitempty"`
    +	Subnet                                *Subnet                                 `json:"subnet,omitempty"`
    +	Primary                               *bool                                   `json:"primary,omitempty"`
    +	PublicIPAddress                       *PublicIPAddress                        `json:"publicIPAddress,omitempty"`
    +	ProvisioningState                     *string                                 `json:"provisioningState,omitempty"`
    +}
    +
    +// InterfaceListResult is response for ListNetworkInterface Api service call
    +type InterfaceListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]Interface `json:"value,omitempty"`
    +	NextLink          *string      `json:"nextLink,omitempty"`
    +}
    +
    +// InterfaceListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client InterfaceListResult) InterfaceListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// InterfacePropertiesFormat is networkInterface properties.
    +type InterfacePropertiesFormat struct {
    +	VirtualMachine              *SubResource                `json:"virtualMachine,omitempty"`
    +	NetworkSecurityGroup        *SecurityGroup              `json:"networkSecurityGroup,omitempty"`
    +	IPConfigurations            *[]InterfaceIPConfiguration `json:"ipConfigurations,omitempty"`
    +	DNSSettings                 *InterfaceDNSSettings       `json:"dnsSettings,omitempty"`
    +	MacAddress                  *string                     `json:"macAddress,omitempty"`
    +	Primary                     *bool                       `json:"primary,omitempty"`
    +	EnableAcceleratedNetworking *bool                       `json:"enableAcceleratedNetworking,omitempty"`
    +	EnableIPForwarding          *bool                       `json:"enableIPForwarding,omitempty"`
    +	ResourceGUID                *string                     `json:"resourceGuid,omitempty"`
    +	ProvisioningState           *string                     `json:"provisioningState,omitempty"`
    +}
    +
    +// IPAddressAvailabilityResult is response for CheckIPAddressAvailability Api
    +// service call
    +type IPAddressAvailabilityResult struct {
    +	autorest.Response    `json:"-"`
    +	Available            *bool     `json:"available,omitempty"`
    +	AvailableIPAddresses *[]string `json:"availableIPAddresses,omitempty"`
    +}
    +
    +// IPConfiguration is iPConfiguration
    +type IPConfiguration struct {
    +	ID         *string                          `json:"id,omitempty"`
    +	Properties *IPConfigurationPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                          `json:"name,omitempty"`
    +	Etag       *string                          `json:"etag,omitempty"`
    +}
    +
    +// IPConfigurationPropertiesFormat is properties of IPConfiguration
    +type IPConfigurationPropertiesFormat struct {
    +	PrivateIPAddress          *string            `json:"privateIPAddress,omitempty"`
    +	PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"`
    +	Subnet                    *Subnet            `json:"subnet,omitempty"`
    +	PublicIPAddress           *PublicIPAddress   `json:"publicIPAddress,omitempty"`
    +	ProvisioningState         *string            `json:"provisioningState,omitempty"`
    +}
    +
    +// LoadBalancer is loadBalancer resource
    +type LoadBalancer struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                       `json:"id,omitempty"`
    +	Name              *string                       `json:"name,omitempty"`
    +	Type              *string                       `json:"type,omitempty"`
    +	Location          *string                       `json:"location,omitempty"`
    +	Tags              *map[string]*string           `json:"tags,omitempty"`
    +	Properties        *LoadBalancerPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                       `json:"etag,omitempty"`
    +}
    +
    +// LoadBalancerListResult is response for ListLoadBalancers Api service call
    +type LoadBalancerListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]LoadBalancer `json:"value,omitempty"`
    +	NextLink          *string         `json:"nextLink,omitempty"`
    +}
    +
    +// LoadBalancerListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client LoadBalancerListResult) LoadBalancerListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// LoadBalancerPropertiesFormat is properties of Load Balancer
    +type LoadBalancerPropertiesFormat struct {
    +	FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"`
    +	BackendAddressPools      *[]BackendAddressPool      `json:"backendAddressPools,omitempty"`
    +	LoadBalancingRules       *[]LoadBalancingRule       `json:"loadBalancingRules,omitempty"`
    +	Probes                   *[]Probe                   `json:"probes,omitempty"`
    +	InboundNatRules          *[]InboundNatRule          `json:"inboundNatRules,omitempty"`
    +	InboundNatPools          *[]InboundNatPool          `json:"inboundNatPools,omitempty"`
    +	OutboundNatRules         *[]OutboundNatRule         `json:"outboundNatRules,omitempty"`
    +	ResourceGUID             *string                    `json:"resourceGuid,omitempty"`
    +	ProvisioningState        *string                    `json:"provisioningState,omitempty"`
    +}
    +
    +// LoadBalancingRule is rules of the load balancer
    +type LoadBalancingRule struct {
    +	ID         *string                            `json:"id,omitempty"`
    +	Properties *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                            `json:"name,omitempty"`
    +	Etag       *string                            `json:"etag,omitempty"`
    +}
    +
    +// LoadBalancingRulePropertiesFormat is properties of the load balancer
    +type LoadBalancingRulePropertiesFormat struct {
    +	FrontendIPConfiguration *SubResource      `json:"frontendIPConfiguration,omitempty"`
    +	BackendAddressPool      *SubResource      `json:"backendAddressPool,omitempty"`
    +	Probe                   *SubResource      `json:"probe,omitempty"`
    +	Protocol                TransportProtocol `json:"protocol,omitempty"`
    +	LoadDistribution        LoadDistribution  `json:"loadDistribution,omitempty"`
    +	FrontendPort            *int32            `json:"frontendPort,omitempty"`
    +	BackendPort             *int32            `json:"backendPort,omitempty"`
    +	IdleTimeoutInMinutes    *int32            `json:"idleTimeoutInMinutes,omitempty"`
    +	EnableFloatingIP        *bool             `json:"enableFloatingIP,omitempty"`
    +	ProvisioningState       *string           `json:"provisioningState,omitempty"`
    +}
    +
    +// LocalNetworkGateway is a common class for general resource information
    +type LocalNetworkGateway struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                              `json:"id,omitempty"`
    +	Name              *string                              `json:"name,omitempty"`
    +	Type              *string                              `json:"type,omitempty"`
    +	Location          *string                              `json:"location,omitempty"`
    +	Tags              *map[string]*string                  `json:"tags,omitempty"`
    +	Properties        *LocalNetworkGatewayPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                              `json:"etag,omitempty"`
    +}
    +
    +// LocalNetworkGatewayListResult is response for ListLocalNetworkGateways Api
    +// service call
    +type LocalNetworkGatewayListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]LocalNetworkGateway `json:"value,omitempty"`
    +	NextLink          *string                `json:"nextLink,omitempty"`
    +}
    +
    +// LocalNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client LocalNetworkGatewayListResult) LocalNetworkGatewayListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// LocalNetworkGatewayPropertiesFormat is localNetworkGateway properties
    +type LocalNetworkGatewayPropertiesFormat struct {
    +	LocalNetworkAddressSpace *AddressSpace `json:"localNetworkAddressSpace,omitempty"`
    +	GatewayIPAddress         *string       `json:"gatewayIpAddress,omitempty"`
    +	BgpSettings              *BgpSettings  `json:"bgpSettings,omitempty"`
    +	ResourceGUID             *string       `json:"resourceGuid,omitempty"`
    +	ProvisioningState        *string       `json:"provisioningState,omitempty"`
    +}
    +
    +// OutboundNatRule is outbound NAT pool of the load balancer
    +type OutboundNatRule struct {
    +	ID         *string                          `json:"id,omitempty"`
    +	Properties *OutboundNatRulePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                          `json:"name,omitempty"`
    +	Etag       *string                          `json:"etag,omitempty"`
    +}
    +
    +// OutboundNatRulePropertiesFormat is outbound NAT pool of the load balancer
    +type OutboundNatRulePropertiesFormat struct {
    +	AllocatedOutboundPorts   *int32         `json:"allocatedOutboundPorts,omitempty"`
    +	FrontendIPConfigurations *[]SubResource `json:"frontendIPConfigurations,omitempty"`
    +	BackendAddressPool       *SubResource   `json:"backendAddressPool,omitempty"`
    +	ProvisioningState        *string        `json:"provisioningState,omitempty"`
    +}
    +
    +// Probe is load balancer Probe
    +type Probe struct {
    +	ID         *string                `json:"id,omitempty"`
    +	Properties *ProbePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                `json:"name,omitempty"`
    +	Etag       *string                `json:"etag,omitempty"`
    +}
    +
    +// ProbePropertiesFormat is
    +type ProbePropertiesFormat struct {
    +	LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"`
    +	Protocol           ProbeProtocol  `json:"protocol,omitempty"`
    +	Port               *int32         `json:"port,omitempty"`
    +	IntervalInSeconds  *int32         `json:"intervalInSeconds,omitempty"`
    +	NumberOfProbes     *int32         `json:"numberOfProbes,omitempty"`
    +	RequestPath        *string        `json:"requestPath,omitempty"`
    +	ProvisioningState  *string        `json:"provisioningState,omitempty"`
    +}
    +
    +// PublicIPAddress is publicIPAddress resource
    +type PublicIPAddress struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                          `json:"id,omitempty"`
    +	Name              *string                          `json:"name,omitempty"`
    +	Type              *string                          `json:"type,omitempty"`
    +	Location          *string                          `json:"location,omitempty"`
    +	Tags              *map[string]*string              `json:"tags,omitempty"`
    +	Properties        *PublicIPAddressPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                          `json:"etag,omitempty"`
    +}
    +
    +// PublicIPAddressDNSSettings is contains FQDN of the DNS record associated
    +// with the public IP address
    +type PublicIPAddressDNSSettings struct {
    +	DomainNameLabel *string `json:"domainNameLabel,omitempty"`
    +	Fqdn            *string `json:"fqdn,omitempty"`
    +	ReverseFqdn     *string `json:"reverseFqdn,omitempty"`
    +}
    +
    +// PublicIPAddressListResult is response for ListPublicIpAddresses Api service
    +// call
    +type PublicIPAddressListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]PublicIPAddress `json:"value,omitempty"`
    +	NextLink          *string            `json:"nextLink,omitempty"`
    +}
    +
    +// PublicIPAddressListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client PublicIPAddressListResult) PublicIPAddressListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// PublicIPAddressPropertiesFormat is publicIpAddress properties
    +type PublicIPAddressPropertiesFormat struct {
    +	PublicIPAllocationMethod IPAllocationMethod          `json:"publicIPAllocationMethod,omitempty"`
    +	PublicIPAddressVersion   IPVersion                   `json:"publicIPAddressVersion,omitempty"`
    +	IPConfiguration          *IPConfiguration            `json:"ipConfiguration,omitempty"`
    +	DNSSettings              *PublicIPAddressDNSSettings `json:"dnsSettings,omitempty"`
    +	IPAddress                *string                     `json:"ipAddress,omitempty"`
    +	IdleTimeoutInMinutes     *int32                      `json:"idleTimeoutInMinutes,omitempty"`
    +	ResourceGUID             *string                     `json:"resourceGuid,omitempty"`
    +	ProvisioningState        *string                     `json:"provisioningState,omitempty"`
    +}
    +
    +// Resource is
    +type Resource struct {
    +	ID       *string             `json:"id,omitempty"`
    +	Name     *string             `json:"name,omitempty"`
    +	Type     *string             `json:"type,omitempty"`
    +	Location *string             `json:"location,omitempty"`
    +	Tags     *map[string]*string `json:"tags,omitempty"`
    +}
    +
    +// ResourceNavigationLink is resourceNavigationLink resource
    +type ResourceNavigationLink struct {
    +	ID         *string                       `json:"id,omitempty"`
    +	Properties *ResourceNavigationLinkFormat `json:"properties,omitempty"`
    +	Name       *string                       `json:"name,omitempty"`
    +	Etag       *string                       `json:"etag,omitempty"`
    +}
    +
    +// ResourceNavigationLinkFormat is properties of ResourceNavigationLink
    +type ResourceNavigationLinkFormat struct {
    +	LinkedResourceType *string `json:"linkedResourceType,omitempty"`
    +	Link               *string `json:"link,omitempty"`
    +	ProvisioningState  *string `json:"provisioningState,omitempty"`
    +}
    +
    +// Route is route resource
    +type Route struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                `json:"id,omitempty"`
    +	Properties        *RoutePropertiesFormat `json:"properties,omitempty"`
    +	Name              *string                `json:"name,omitempty"`
    +	Etag              *string                `json:"etag,omitempty"`
    +}
    +
    +// RouteListResult is response for ListRoute Api service call
    +type RouteListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]Route `json:"value,omitempty"`
    +	NextLink          *string  `json:"nextLink,omitempty"`
    +}
    +
    +// RouteListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client RouteListResult) RouteListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// RoutePropertiesFormat is route resource
    +type RoutePropertiesFormat struct {
    +	AddressPrefix     *string          `json:"addressPrefix,omitempty"`
    +	NextHopType       RouteNextHopType `json:"nextHopType,omitempty"`
    +	NextHopIPAddress  *string          `json:"nextHopIpAddress,omitempty"`
    +	ProvisioningState *string          `json:"provisioningState,omitempty"`
    +}
    +
    +// RouteTable is routeTable resource
    +type RouteTable struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                     `json:"id,omitempty"`
    +	Name              *string                     `json:"name,omitempty"`
    +	Type              *string                     `json:"type,omitempty"`
    +	Location          *string                     `json:"location,omitempty"`
    +	Tags              *map[string]*string         `json:"tags,omitempty"`
    +	Properties        *RouteTablePropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                     `json:"etag,omitempty"`
    +}
    +
    +// RouteTableListResult is response for ListRouteTable Api service call
    +type RouteTableListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]RouteTable `json:"value,omitempty"`
    +	NextLink          *string       `json:"nextLink,omitempty"`
    +}
    +
    +// RouteTableListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client RouteTableListResult) RouteTableListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// RouteTablePropertiesFormat is route Table resource
    +type RouteTablePropertiesFormat struct {
    +	Routes            *[]Route  `json:"routes,omitempty"`
    +	Subnets           *[]Subnet `json:"subnets,omitempty"`
    +	ProvisioningState *string   `json:"provisioningState,omitempty"`
    +}
    +
    +// SecurityGroup is networkSecurityGroup resource
    +type SecurityGroup struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                        `json:"id,omitempty"`
    +	Name              *string                        `json:"name,omitempty"`
    +	Type              *string                        `json:"type,omitempty"`
    +	Location          *string                        `json:"location,omitempty"`
    +	Tags              *map[string]*string            `json:"tags,omitempty"`
    +	Properties        *SecurityGroupPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                        `json:"etag,omitempty"`
    +}
    +
    +// SecurityGroupListResult is response for ListNetworkSecurityGroups Api
    +// service call
    +type SecurityGroupListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]SecurityGroup `json:"value,omitempty"`
    +	NextLink          *string          `json:"nextLink,omitempty"`
    +}
    +
    +// SecurityGroupListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client SecurityGroupListResult) SecurityGroupListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// SecurityGroupPropertiesFormat is network Security Group resource
    +type SecurityGroupPropertiesFormat struct {
    +	SecurityRules        *[]SecurityRule `json:"securityRules,omitempty"`
    +	DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"`
    +	NetworkInterfaces    *[]Interface    `json:"networkInterfaces,omitempty"`
    +	Subnets              *[]Subnet       `json:"subnets,omitempty"`
    +	ResourceGUID         *string         `json:"resourceGuid,omitempty"`
    +	ProvisioningState    *string         `json:"provisioningState,omitempty"`
    +}
    +
    +// SecurityRule is network security rule
    +type SecurityRule struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                       `json:"id,omitempty"`
    +	Properties        *SecurityRulePropertiesFormat `json:"properties,omitempty"`
    +	Name              *string                       `json:"name,omitempty"`
    +	Etag              *string                       `json:"etag,omitempty"`
    +}
    +
    +// SecurityRuleListResult is response for ListSecurityRule Api service
    +// callRetrieves all security rules that belongs to a network security group
    +type SecurityRuleListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]SecurityRule `json:"value,omitempty"`
    +	NextLink          *string         `json:"nextLink,omitempty"`
    +}
    +
    +// SecurityRuleListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client SecurityRuleListResult) SecurityRuleListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// SecurityRulePropertiesFormat is
    +type SecurityRulePropertiesFormat struct {
    +	Description              *string               `json:"description,omitempty"`
    +	Protocol                 SecurityRuleProtocol  `json:"protocol,omitempty"`
    +	SourcePortRange          *string               `json:"sourcePortRange,omitempty"`
    +	DestinationPortRange     *string               `json:"destinationPortRange,omitempty"`
    +	SourceAddressPrefix      *string               `json:"sourceAddressPrefix,omitempty"`
    +	DestinationAddressPrefix *string               `json:"destinationAddressPrefix,omitempty"`
    +	Access                   SecurityRuleAccess    `json:"access,omitempty"`
    +	Priority                 *int32                `json:"priority,omitempty"`
    +	Direction                SecurityRuleDirection `json:"direction,omitempty"`
    +	ProvisioningState        *string               `json:"provisioningState,omitempty"`
    +}
    +
    +// String is
    +type String struct {
    +	autorest.Response `json:"-"`
    +	Value             *string `json:"value,omitempty"`
    +}
    +
    +// Subnet is subnet in a VirtualNework resource
    +type Subnet struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                 `json:"id,omitempty"`
    +	Properties        *SubnetPropertiesFormat `json:"properties,omitempty"`
    +	Name              *string                 `json:"name,omitempty"`
    +	Etag              *string                 `json:"etag,omitempty"`
    +}
    +
    +// SubnetListResult is response for ListSubnets Api service callRetrieves all
    +// subnet that belongs to a virtual network
    +type SubnetListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]Subnet `json:"value,omitempty"`
    +	NextLink          *string   `json:"nextLink,omitempty"`
    +}
    +
    +// SubnetListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client SubnetListResult) SubnetListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// SubnetPropertiesFormat is
    +type SubnetPropertiesFormat struct {
    +	AddressPrefix           *string                   `json:"addressPrefix,omitempty"`
    +	NetworkSecurityGroup    *SecurityGroup            `json:"networkSecurityGroup,omitempty"`
    +	RouteTable              *RouteTable               `json:"routeTable,omitempty"`
    +	IPConfigurations        *[]IPConfiguration        `json:"ipConfigurations,omitempty"`
    +	ResourceNavigationLinks *[]ResourceNavigationLink `json:"resourceNavigationLinks,omitempty"`
    +	ProvisioningState       *string                   `json:"provisioningState,omitempty"`
    +}
    +
    +// SubResource is
    +type SubResource struct {
    +	ID *string `json:"id,omitempty"`
    +}
    +
    +// Usage is describes Network Resource Usage.
    +type Usage struct {
    +	Unit         *string    `json:"unit,omitempty"`
    +	CurrentValue *int64     `json:"currentValue,omitempty"`
    +	Limit        *int64     `json:"limit,omitempty"`
    +	Name         *UsageName `json:"name,omitempty"`
    +}
    +
    +// UsageName is the Usage Names.
    +type UsageName struct {
    +	Value          *string `json:"value,omitempty"`
    +	LocalizedValue *string `json:"localizedValue,omitempty"`
    +}
    +
    +// UsagesListResult is the List Usages operation response.
    +type UsagesListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]Usage `json:"value,omitempty"`
    +	NextLink          *string  `json:"nextLink,omitempty"`
    +}
    +
    +// UsagesListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client UsagesListResult) UsagesListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualNetwork is virtual Network resource
    +type VirtualNetwork struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                         `json:"id,omitempty"`
    +	Name              *string                         `json:"name,omitempty"`
    +	Type              *string                         `json:"type,omitempty"`
    +	Location          *string                         `json:"location,omitempty"`
    +	Tags              *map[string]*string             `json:"tags,omitempty"`
    +	Properties        *VirtualNetworkPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                         `json:"etag,omitempty"`
    +}
    +
    +// VirtualNetworkGateway is a common class for general resource information
    +type VirtualNetworkGateway struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                                `json:"id,omitempty"`
    +	Name              *string                                `json:"name,omitempty"`
    +	Type              *string                                `json:"type,omitempty"`
    +	Location          *string                                `json:"location,omitempty"`
    +	Tags              *map[string]*string                    `json:"tags,omitempty"`
    +	Properties        *VirtualNetworkGatewayPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                                `json:"etag,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayConnection is a common class for general resource
    +// information
    +type VirtualNetworkGatewayConnection struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                                          `json:"id,omitempty"`
    +	Name              *string                                          `json:"name,omitempty"`
    +	Type              *string                                          `json:"type,omitempty"`
    +	Location          *string                                          `json:"location,omitempty"`
    +	Tags              *map[string]*string                              `json:"tags,omitempty"`
    +	Properties        *VirtualNetworkGatewayConnectionPropertiesFormat `json:"properties,omitempty"`
    +	Etag              *string                                          `json:"etag,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayConnectionListResult is response for
    +// ListVirtualNetworkGatewayConnections Api service call
    +type VirtualNetworkGatewayConnectionListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualNetworkGatewayConnection `json:"value,omitempty"`
    +	NextLink          *string                            `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayConnectionListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualNetworkGatewayConnectionListResult) VirtualNetworkGatewayConnectionListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualNetworkGatewayConnectionPropertiesFormat is
    +// virtualNetworkGatewayConnection properties
    +type VirtualNetworkGatewayConnectionPropertiesFormat struct {
    +	AuthorizationKey        *string                               `json:"authorizationKey,omitempty"`
    +	VirtualNetworkGateway1  *VirtualNetworkGateway                `json:"virtualNetworkGateway1,omitempty"`
    +	VirtualNetworkGateway2  *VirtualNetworkGateway                `json:"virtualNetworkGateway2,omitempty"`
    +	LocalNetworkGateway2    *LocalNetworkGateway                  `json:"localNetworkGateway2,omitempty"`
    +	ConnectionType          VirtualNetworkGatewayConnectionType   `json:"connectionType,omitempty"`
    +	RoutingWeight           *int32                                `json:"routingWeight,omitempty"`
    +	SharedKey               *string                               `json:"sharedKey,omitempty"`
    +	ConnectionStatus        VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"`
    +	EgressBytesTransferred  *int64                                `json:"egressBytesTransferred,omitempty"`
    +	IngressBytesTransferred *int64                                `json:"ingressBytesTransferred,omitempty"`
    +	Peer                    *SubResource                          `json:"peer,omitempty"`
    +	EnableBgp               *bool                                 `json:"enableBgp,omitempty"`
    +	ResourceGUID            *string                               `json:"resourceGuid,omitempty"`
    +	ProvisioningState       *string                               `json:"provisioningState,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayIPConfiguration is ipConfiguration for Virtual network
    +// gateway
    +type VirtualNetworkGatewayIPConfiguration struct {
    +	ID         *string                                               `json:"id,omitempty"`
    +	Properties *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                               `json:"name,omitempty"`
    +	Etag       *string                                               `json:"etag,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of
    +// VirtualNetworkGatewayIPConfiguration
    +type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct {
    +	PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"`
    +	Subnet                    *SubResource       `json:"subnet,omitempty"`
    +	PublicIPAddress           *SubResource       `json:"publicIPAddress,omitempty"`
    +	ProvisioningState         *string            `json:"provisioningState,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayListResult is response for ListVirtualNetworkGateways
    +// Api service call
    +type VirtualNetworkGatewayListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualNetworkGateway `json:"value,omitempty"`
    +	NextLink          *string                  `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualNetworkGatewayListResult) VirtualNetworkGatewayListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualNetworkGatewayPropertiesFormat is virtualNetworkGateway properties
    +type VirtualNetworkGatewayPropertiesFormat struct {
    +	IPConfigurations       *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"`
    +	GatewayType            VirtualNetworkGatewayType               `json:"gatewayType,omitempty"`
    +	VpnType                VpnType                                 `json:"vpnType,omitempty"`
    +	EnableBgp              *bool                                   `json:"enableBgp,omitempty"`
    +	ActiveActive           *bool                                   `json:"activeActive,omitempty"`
    +	GatewayDefaultSite     *SubResource                            `json:"gatewayDefaultSite,omitempty"`
    +	Sku                    *VirtualNetworkGatewaySku               `json:"sku,omitempty"`
    +	VpnClientConfiguration *VpnClientConfiguration                 `json:"vpnClientConfiguration,omitempty"`
    +	BgpSettings            *BgpSettings                            `json:"bgpSettings,omitempty"`
    +	ResourceGUID           *string                                 `json:"resourceGuid,omitempty"`
    +	ProvisioningState      *string                                 `json:"provisioningState,omitempty"`
    +}
    +
    +// VirtualNetworkGatewaySku is virtualNetworkGatewaySku details
    +type VirtualNetworkGatewaySku struct {
    +	Name     VirtualNetworkGatewaySkuName `json:"name,omitempty"`
    +	Tier     VirtualNetworkGatewaySkuTier `json:"tier,omitempty"`
    +	Capacity *int32                       `json:"capacity,omitempty"`
    +}
    +
    +// VirtualNetworkListResult is response for ListVirtualNetworks Api service
    +// call
    +type VirtualNetworkListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualNetwork `json:"value,omitempty"`
    +	NextLink          *string           `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualNetworkListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualNetworkListResult) VirtualNetworkListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualNetworkPeering is peerings in a VirtualNework resource
    +type VirtualNetworkPeering struct {
    +	autorest.Response `json:"-"`
    +	ID                *string                                `json:"id,omitempty"`
    +	Properties        *VirtualNetworkPeeringPropertiesFormat `json:"properties,omitempty"`
    +	Name              *string                                `json:"name,omitempty"`
    +	Etag              *string                                `json:"etag,omitempty"`
    +}
    +
    +// VirtualNetworkPeeringListResult is response for ListSubnets Api service
    +// callRetrieves all subnet that belongs to a virtual network
    +type VirtualNetworkPeeringListResult struct {
    +	autorest.Response `json:"-"`
    +	Value             *[]VirtualNetworkPeering `json:"value,omitempty"`
    +	NextLink          *string                  `json:"nextLink,omitempty"`
    +}
    +
    +// VirtualNetworkPeeringListResultPreparer prepares a request to retrieve the next set of results. It returns
    +// nil if no more results exist.
    +func (client VirtualNetworkPeeringListResult) VirtualNetworkPeeringListResultPreparer() (*http.Request, error) {
    +	if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
    +		return nil, nil
    +	}
    +	return autorest.Prepare(&http.Request{},
    +		autorest.AsJSON(),
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(to.String(client.NextLink)))
    +}
    +
    +// VirtualNetworkPeeringPropertiesFormat is
    +type VirtualNetworkPeeringPropertiesFormat struct {
    +	AllowVirtualNetworkAccess *bool                      `json:"allowVirtualNetworkAccess,omitempty"`
    +	AllowForwardedTraffic     *bool                      `json:"allowForwardedTraffic,omitempty"`
    +	AllowGatewayTransit       *bool                      `json:"allowGatewayTransit,omitempty"`
    +	UseRemoteGateways         *bool                      `json:"useRemoteGateways,omitempty"`
    +	RemoteVirtualNetwork      *SubResource               `json:"remoteVirtualNetwork,omitempty"`
    +	PeeringState              VirtualNetworkPeeringState `json:"peeringState,omitempty"`
    +	ProvisioningState         *string                    `json:"provisioningState,omitempty"`
    +}
    +
    +// VirtualNetworkPropertiesFormat is
    +type VirtualNetworkPropertiesFormat struct {
    +	AddressSpace           *AddressSpace            `json:"addressSpace,omitempty"`
    +	DhcpOptions            *DhcpOptions             `json:"dhcpOptions,omitempty"`
    +	Subnets                *[]Subnet                `json:"subnets,omitempty"`
    +	VirtualNetworkPeerings *[]VirtualNetworkPeering `json:"VirtualNetworkPeerings,omitempty"`
    +	ResourceGUID           *string                  `json:"resourceGuid,omitempty"`
    +	ProvisioningState      *string                  `json:"provisioningState,omitempty"`
    +}
    +
    +// VpnClientConfiguration is vpnClientConfiguration for P2S client
    +type VpnClientConfiguration struct {
    +	VpnClientAddressPool         *AddressSpace                  `json:"vpnClientAddressPool,omitempty"`
    +	VpnClientRootCertificates    *[]VpnClientRootCertificate    `json:"vpnClientRootCertificates,omitempty"`
    +	VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"`
    +}
    +
    +// VpnClientParameters is vpnClientParameters
    +type VpnClientParameters struct {
    +	ProcessorArchitecture ProcessorArchitecture `json:"ProcessorArchitecture,omitempty"`
    +}
    +
    +// VpnClientRevokedCertificate is vPN client revoked certificate of virtual
    +// network gateway
    +type VpnClientRevokedCertificate struct {
    +	ID         *string                                      `json:"id,omitempty"`
    +	Properties *VpnClientRevokedCertificatePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                      `json:"name,omitempty"`
    +	Etag       *string                                      `json:"etag,omitempty"`
    +}
    +
    +// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked
    +// VPN client certificate of virtual network gateway
    +type VpnClientRevokedCertificatePropertiesFormat struct {
    +	Thumbprint        *string `json:"thumbprint,omitempty"`
    +	ProvisioningState *string `json:"provisioningState,omitempty"`
    +}
    +
    +// VpnClientRootCertificate is vPN client root certificate of virtual network
    +// gateway
    +type VpnClientRootCertificate struct {
    +	ID         *string                                   `json:"id,omitempty"`
    +	Properties *VpnClientRootCertificatePropertiesFormat `json:"properties,omitempty"`
    +	Name       *string                                   `json:"name,omitempty"`
    +	Etag       *string                                   `json:"etag,omitempty"`
    +}
    +
    +// VpnClientRootCertificatePropertiesFormat is properties of SSL certificates
    +// of application gateway
    +type VpnClientRootCertificatePropertiesFormat struct {
    +	PublicCertData    *string `json:"publicCertData,omitempty"`
    +	ProvisioningState *string `json:"provisioningState,omitempty"`
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go
    new file mode 100644
    index 0000000..40bf7cc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go
    @@ -0,0 +1,448 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// PublicIPAddressesClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type PublicIPAddressesClient struct {
    +	ManagementClient
    +}
    +
    +// NewPublicIPAddressesClient creates an instance of the
    +// PublicIPAddressesClient client.
    +func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient {
    +	return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewPublicIPAddressesClientWithBaseURI creates an instance of the
    +// PublicIPAddressesClient client.
    +func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient {
    +	return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put PublicIPAddress operation creates/updates a
    +// stable/dynamic PublicIP address This method may poll for completion.
    +// Polling can be canceled by passing the cancel channel argument. The
    +// channel will be used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. publicIPAddressName is
    +// the name of the publicIpAddress. parameters is parameters supplied to the
    +// create/update PublicIPAddress operation
    +func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration", Name: validation.Null, Rule: false,
    +					Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties", Name: validation.Null, Rule: false,
    +						Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet", Name: validation.Null, Rule: false,
    +							Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties", Name: validation.Null, Rule: false,
    +								Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.NetworkSecurityGroup", Name: validation.Null, Rule: false,
    +									Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.NetworkSecurityGroup.Properties", Name: validation.Null, Rule: false,
    +										Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.NetworkSecurityGroup.Properties.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +											{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.NetworkSecurityGroup.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +										}},
    +									}},
    +									{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.RouteTable", Name: validation.Null, Rule: false,
    +										Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.RouteTable.Properties", Name: validation.Null, Rule: false,
    +											Chain: []validation.Constraint{{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.RouteTable.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}},
    +										}},
    +									{Target: "parameters.Properties.IPConfiguration.Properties.Subnet.Properties.IPConfigurations", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +								}},
    +							}},
    +							{Target: "parameters.Properties.IPConfiguration.Properties.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil},
    +						}},
    +					}},
    +					{Target: "parameters.Properties.IPConfiguration", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.PublicIPAddressesClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"publicIpAddressName": autorest.Encode("path", publicIPAddressName),
    +		"resourceGroupName":   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":      autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete publicIpAddress operation deletes the specified
    +// publicIpAddress. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. publicIPAddressName is
    +// the name of the subnet.
    +func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"publicIpAddressName": autorest.Encode("path", publicIPAddressName),
    +		"resourceGroupName":   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":      autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get publicIpAddress operation retrieves information about the
    +// specified pubicIpAddress
    +//
    +// resourceGroupName is the name of the resource group. publicIPAddressName is
    +// the name of the subnet. expand is expand references resources.
    +func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, publicIPAddressName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publicIPAddressName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"publicIpAddressName": autorest.Encode("path", publicIPAddressName),
    +		"resourceGroupName":   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":      autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result PublicIPAddress, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List publicIpAddress operation retrieves all the publicIpAddresses
    +// in a resource group.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client PublicIPAddressesClient) ListResponder(resp *http.Response) (result PublicIPAddressListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) {
    +	req, err := lastResults.PublicIPAddressListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the List publicIpAddress operation retrieves all the
    +// publicIpAddresses in a subscription.
    +func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client PublicIPAddressesClient) ListAllResponder(resp *http.Response) (result PublicIPAddressListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) {
    +	req, err := lastResults.PublicIPAddressListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go
    new file mode 100644
    index 0000000..fd0c681
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go
    @@ -0,0 +1,337 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// RoutesClient is the the Microsoft Azure Network management API provides a
    +// RESTful set of web services that interact with Microsoft Azure Networks
    +// service to manage your network resources. The API has entities that
    +// capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type RoutesClient struct {
    +	ManagementClient
    +}
    +
    +// NewRoutesClient creates an instance of the RoutesClient client.
    +func NewRoutesClient(subscriptionID string) RoutesClient {
    +	return NewRoutesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewRoutesClientWithBaseURI creates an instance of the RoutesClient client.
    +func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesClient {
    +	return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put route operation creates/updates a route in the
    +// specified route table This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table. routeName is the name of the route.
    +// routeParameters is parameters supplied to the create/update route
    +// operation
    +func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeName":         autorest.Encode("path", routeName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters),
    +		autorest.WithJSON(routeParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete route operation deletes the specified route from a route
    +// table. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table. routeName is the name of the route.
    +func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeName":         autorest.Encode("path", routeName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RoutesClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client RoutesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get route operation retrieves information about the specified route
    +// from the route table.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table. routeName is the name of the route.
    +func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, routeTableName, routeName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeName":         autorest.Encode("path", routeName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List network security rule operation retrieves all the routes in a
    +// route table.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table.
    +func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, routeTableName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client RoutesClient) ListResponder(resp *http.Response) (result RouteListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client RoutesClient) ListNextResults(lastResults RouteListResult) (result RouteListResult, err error) {
    +	req, err := lastResults.RouteListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go
    new file mode 100644
    index 0000000..81e5a0e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go
    @@ -0,0 +1,424 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// RouteTablesClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type RouteTablesClient struct {
    +	ManagementClient
    +}
    +
    +// NewRouteTablesClient creates an instance of the RouteTablesClient client.
    +func NewRouteTablesClient(subscriptionID string) RouteTablesClient {
    +	return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewRouteTablesClientWithBaseURI creates an instance of the
    +// RouteTablesClient client.
    +func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient {
    +	return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put RouteTable operation creates/updates a route table
    +// in the specified resource group. This method may poll for completion.
    +// Polling can be canceled by passing the cancel channel argument. The
    +// channel will be used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table. parameters is parameters supplied to the
    +// create/update Route Table operation
    +func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.RouteTablesClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the Delete RouteTable operation deletes the specified Route Table
    +// This method may poll for completion. Polling can be canceled by passing
    +// the cancel channel argument. The channel will be used to cancel polling
    +// and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table.
    +func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, routeTableName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RouteTablesClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client RouteTablesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get RouteTables operation retrieves information about the specified
    +// route table.
    +//
    +// resourceGroupName is the name of the resource group. routeTableName is the
    +// name of the route table. expand is expand references resources.
    +func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result RouteTable, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, routeTableName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTableName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"routeTableName":    autorest.Encode("path", routeTableName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client RouteTablesClient) GetResponder(resp *http.Response) (result RouteTable, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the list RouteTables returns all route tables in a resource group
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client RouteTablesClient) List(resourceGroupName string) (result RouteTableListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client RouteTablesClient) ListResponder(resp *http.Response) (result RouteTableListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult) (result RouteTableListResult, err error) {
    +	req, err := lastResults.RouteTableListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the list RouteTables returns all route tables in a subscription
    +func (client RouteTablesClient) ListAll() (result RouteTableListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client RouteTablesClient) ListAllResponder(resp *http.Response) (result RouteTableListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client RouteTablesClient) ListAllNextResults(lastResults RouteTableListResult) (result RouteTableListResult, err error) {
    +	req, err := lastResults.RouteTableListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go
    new file mode 100644
    index 0000000..187ca8b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go
    @@ -0,0 +1,432 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// SecurityGroupsClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type SecurityGroupsClient struct {
    +	ManagementClient
    +}
    +
    +// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient
    +// client.
    +func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient {
    +	return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewSecurityGroupsClientWithBaseURI creates an instance of the
    +// SecurityGroupsClient client.
    +func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient {
    +	return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put NetworkSecurityGroup operation creates/updates a
    +// network security group in the specified resource group. This method may
    +// poll for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group.
    +// parameters is parameters supplied to the create/update Network Security
    +// Group operation
    +func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: parameters,
    +			Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +					{Target: "parameters.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.SecurityGroupsClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the Delete NetworkSecurityGroup operation deletes the specified
    +// network security group This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group.
    +func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get NetworkSecurityGroups operation retrieves information about the
    +// specified network security group.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group. expand
    +// is expand references resources.
    +func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result SecurityGroup, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the list NetworkSecurityGroups returns all network security groups in
    +// a resource group
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client SecurityGroupsClient) ListResponder(resp *http.Response) (result SecurityGroupListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) {
    +	req, err := lastResults.SecurityGroupListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the list NetworkSecurityGroups returns all network security groups
    +// in a subscription
    +func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client SecurityGroupsClient) ListAllResponder(resp *http.Response) (result SecurityGroupListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) {
    +	req, err := lastResults.SecurityGroupListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go
    new file mode 100644
    index 0000000..c5ed15e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go
    @@ -0,0 +1,353 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// SecurityRulesClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type SecurityRulesClient struct {
    +	ManagementClient
    +}
    +
    +// NewSecurityRulesClient creates an instance of the SecurityRulesClient
    +// client.
    +func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient {
    +	return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewSecurityRulesClientWithBaseURI creates an instance of the
    +// SecurityRulesClient client.
    +func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient {
    +	return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put network security rule operation creates/updates a
    +// security rule in the specified network security group This method may poll
    +// for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group.
    +// securityRuleName is the name of the security rule. securityRuleParameters
    +// is parameters supplied to the create/update network security rule
    +// operation
    +func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: securityRuleParameters,
    +			Constraints: []validation.Constraint{{Target: "securityRuleParameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "securityRuleParameters.Properties.SourceAddressPrefix", Name: validation.Null, Rule: true, Chain: nil},
    +					{Target: "securityRuleParameters.Properties.DestinationAddressPrefix", Name: validation.Null, Rule: true, Chain: nil},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.SecurityRulesClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"securityRuleName":         autorest.Encode("path", securityRuleName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters),
    +		autorest.WithJSON(securityRuleParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete network security rule operation deletes the specified
    +// network security rule. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group.
    +// securityRuleName is the name of the security rule.
    +func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"securityRuleName":         autorest.Encode("path", securityRuleName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get NetworkSecurityRule operation retrieves information about the
    +// specified network security rule.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group.
    +// securityRuleName is the name of the security rule.
    +func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"securityRuleName":         autorest.Encode("path", securityRuleName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client SecurityRulesClient) GetResponder(resp *http.Response) (result SecurityRule, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List network security rule operation retrieves all the security
    +// rules in a network security group.
    +//
    +// resourceGroupName is the name of the resource group.
    +// networkSecurityGroupName is the name of the network security group.
    +func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client SecurityRulesClient) ListPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName),
    +		"resourceGroupName":        autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":           autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client SecurityRulesClient) ListResponder(resp *http.Response) (result SecurityRuleListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, err error) {
    +	req, err := lastResults.SecurityRuleListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go
    new file mode 100644
    index 0000000..3e27f48
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go
    @@ -0,0 +1,360 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// SubnetsClient is the the Microsoft Azure Network management API provides a
    +// RESTful set of web services that interact with Microsoft Azure Networks
    +// service to manage your network resources. The API has entities that
    +// capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type SubnetsClient struct {
    +	ManagementClient
    +}
    +
    +// NewSubnetsClient creates an instance of the SubnetsClient client.
    +func NewSubnetsClient(subscriptionID string) SubnetsClient {
    +	return NewSubnetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewSubnetsClientWithBaseURI creates an instance of the SubnetsClient client.
    +func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsClient {
    +	return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put Subnet operation creates/updates a subnet in the
    +// specified virtual network This method may poll for completion. Polling can
    +// be canceled by passing the cancel channel argument. The channel will be
    +// used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. subnetName is the name of the subnet.
    +// subnetParameters is parameters supplied to the create/update Subnet
    +// operation
    +func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: subnetParameters,
    +			Constraints: []validation.Constraint{{Target: "subnetParameters.Properties", Name: validation.Null, Rule: false,
    +				Chain: []validation.Constraint{{Target: "subnetParameters.Properties.NetworkSecurityGroup", Name: validation.Null, Rule: false,
    +					Chain: []validation.Constraint{{Target: "subnetParameters.Properties.NetworkSecurityGroup.Properties", Name: validation.Null, Rule: false,
    +						Chain: []validation.Constraint{{Target: "subnetParameters.Properties.NetworkSecurityGroup.Properties.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +							{Target: "subnetParameters.Properties.NetworkSecurityGroup.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +						}},
    +					}},
    +					{Target: "subnetParameters.Properties.RouteTable", Name: validation.Null, Rule: false,
    +						Chain: []validation.Constraint{{Target: "subnetParameters.Properties.RouteTable.Properties", Name: validation.Null, Rule: false,
    +							Chain: []validation.Constraint{{Target: "subnetParameters.Properties.RouteTable.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}},
    +						}},
    +					{Target: "subnetParameters.Properties.IPConfigurations", Name: validation.ReadOnly, Rule: true, Chain: nil},
    +				}}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.SubnetsClient", "CreateOrUpdate")
    +	}
    +
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subnetName":         autorest.Encode("path", subnetName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters),
    +		autorest.WithJSON(subnetParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete subnet operation deletes the specified subnet. This
    +// method may poll for completion. Polling can be canceled by passing the
    +// cancel channel argument. The channel will be used to cancel polling and
    +// any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. subnetName is the name of the subnet.
    +func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subnetName":         autorest.Encode("path", subnetName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get subnet operation retrieves information about the specified
    +// subnet.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. subnetName is the name of the subnet.
    +// expand is expand references resources.
    +func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subnetName":         autorest.Encode("path", subnetName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List subnets operation retrieves all the subnets in a virtual
    +// network.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network.
    +func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, virtualNetworkName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client SubnetsClient) ListResponder(resp *http.Response) (result SubnetListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (result SubnetListResult, err error) {
    +	req, err := lastResults.SubnetListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go
    new file mode 100644
    index 0000000..8c246fe
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go
    @@ -0,0 +1,136 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"github.com/Azure/go-autorest/autorest/validation"
    +	"net/http"
    +)
    +
    +// UsagesClient is the the Microsoft Azure Network management API provides a
    +// RESTful set of web services that interact with Microsoft Azure Networks
    +// service to manage your network resources. The API has entities that
    +// capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type UsagesClient struct {
    +	ManagementClient
    +}
    +
    +// NewUsagesClient creates an instance of the UsagesClient client.
    +func NewUsagesClient(subscriptionID string) UsagesClient {
    +	return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client.
    +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
    +	return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// List lists compute usages for a subscription.
    +//
    +// location is the location upon which resource usage is queried.
    +func (client UsagesClient) List(location string) (result UsagesListResult, err error) {
    +	if err := validation.Validate([]validation.Validation{
    +		{TargetValue: location,
    +			Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
    +		return result, validation.NewErrorWithValidationError(err, "network.UsagesClient", "List")
    +	}
    +
    +	req, err := client.ListPreparer(location)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client UsagesClient) ListPreparer(location string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"location":       autorest.Encode("path", location),
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client UsagesClient) ListResponder(resp *http.Response) (result UsagesListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client UsagesClient) ListNextResults(lastResults UsagesListResult) (result UsagesListResult, err error) {
    +	req, err := lastResults.UsagesListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go
    new file mode 100644
    index 0000000..aebb35c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go
    @@ -0,0 +1,43 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"fmt"
    +)
    +
    +const (
    +	major = "6"
    +	minor = "0"
    +	patch = "0"
    +	// Always begin a "tag" with a dash (as per http://semver.org)
    +	tag             = "-beta"
    +	semVerFormat    = "%s.%s.%s%s"
    +	userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s"
    +)
    +
    +// UserAgent returns the UserAgent string to use when sending http.Requests.
    +func UserAgent() string {
    +	return fmt.Sprintf(userAgentFormat, Version(), "network", "2016-09-01")
    +}
    +
    +// Version returns the semantic version (see http://semver.org) of the client.
    +func Version() string {
    +	return fmt.Sprintf(semVerFormat, major, minor, patch, tag)
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go
    new file mode 100644
    index 0000000..a7feb8f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go
    @@ -0,0 +1,555 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualNetworkGatewayConnectionsClient is the the Microsoft Azure Network
    +// management API provides a RESTful set of web services that interact with
    +// Microsoft Azure Networks service to manage your network resources. The API
    +// has entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type VirtualNetworkGatewayConnectionsClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualNetworkGatewayConnectionsClient creates an instance of the
    +// VirtualNetworkGatewayConnectionsClient client.
    +func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient {
    +	return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of
    +// the VirtualNetworkGatewayConnectionsClient client.
    +func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient {
    +	return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put VirtualNetworkGatewayConnection operation
    +// creates/updates a virtual network gateway connection in the specified
    +// resource group through Network resource provider. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayConnectionName is the name of the virtual network
    +// gateway connection. parameters is parameters supplied to the Begin Create
    +// or update Virtual Network Gateway connection operation through Network
    +// resource provider.
    +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":                   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":                      autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the Delete VirtualNetworkGatewayConnection operation deletes the
    +// specified virtual network Gateway connection through Network resource
    +// provider. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayConnectionName is the name of the virtual network
    +// gateway connection.
    +func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":                   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":                      autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get VirtualNetworkGatewayConnection operation retrieves information
    +// about the specified virtual network gateway connection through Network
    +// resource provider.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayConnectionName is the name of the virtual network
    +// gateway connection.
    +func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":                   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":                      autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation
    +// retrieves information about the specified virtual network gateway
    +// connection shared key through Network resource provider.
    +//
    +// resourceGroupName is the name of the resource group.
    +// connectionSharedKeyName is the virtual network gateway connection shared
    +// key name.
    +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, connectionSharedKeyName string) (result ConnectionSharedKeyResult, err error) {
    +	req, err := client.GetSharedKeyPreparer(resourceGroupName, connectionSharedKeyName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSharedKeySender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetSharedKeyResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetSharedKeyPreparer prepares the GetSharedKey request.
    +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resourceGroupName string, connectionSharedKeyName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"connectionSharedKeyName": autorest.Encode("path", connectionSharedKeyName),
    +		"resourceGroupName":       autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":          autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{connectionSharedKeyName}/sharedkey", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSharedKeySender sends the GetSharedKey request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetSharedKeyResponder handles the response to the GetSharedKey request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKeyResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List VirtualNetworkGatewayConnections operation retrieves all the
    +// virtual network gateways connections created.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayConnectionListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, err error) {
    +	req, err := lastResults.VirtualNetworkGatewayConnectionListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation
    +// resets the virtual network gateway connection shared key for passed
    +// virtual network gateway connection in the specified resource group through
    +// Network resource provider. This method may poll for completion. Polling
    +// can be canceled by passing the cancel channel argument. The channel will
    +// be used to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayConnectionName is the virtual network gateway
    +// connection reset shared key Name. parameters is parameters supplied to the
    +// Begin Reset Virtual Network Gateway connection shared key operation
    +// through Network resource provider.
    +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ResetSharedKeySender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ResetSharedKeyResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ResetSharedKeyPreparer prepares the ResetSharedKey request.
    +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":                   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":                      autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ResetSharedKeySender sends the ResetSharedKey request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation
    +// sets the virtual network gateway connection shared key for passed virtual
    +// network gateway connection in the specified resource group through Network
    +// resource provider. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayConnectionName is the virtual network gateway
    +// connection name. parameters is parameters supplied to the Begin Set
    +// Virtual Network Gateway connection Shared key operation throughNetwork
    +// resource provider.
    +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.SetSharedKeySender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.SetSharedKeyResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// SetSharedKeyPreparer prepares the SetSharedKey request.
    +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":                   autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":                      autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// SetSharedKeySender sends the SetSharedKey request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// SetSharedKeyResponder handles the response to the SetSharedKey request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go
    new file mode 100644
    index 0000000..12e12bc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go
    @@ -0,0 +1,477 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualNetworkGatewaysClient is the the Microsoft Azure Network management
    +// API provides a RESTful set of web services that interact with Microsoft
    +// Azure Networks service to manage your network resources. The API has
    +// entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type VirtualNetworkGatewaysClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualNetworkGatewaysClient creates an instance of the
    +// VirtualNetworkGatewaysClient client.
    +func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient {
    +	return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the
    +// VirtualNetworkGatewaysClient client.
    +func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient {
    +	return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put VirtualNetworkGateway operation creates/updates a
    +// virtual network gateway in the specified resource group through Network
    +// resource provider. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayName is the name of the virtual network gateway.
    +// parameters is parameters supplied to the Begin Create or update Virtual
    +// Network Gateway operation through Network resource provider.
    +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the Delete VirtualNetworkGateway operation deletes the specified
    +// virtual network Gateway through Network resource provider. This method may
    +// poll for completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayName is the name of the virtual network gateway.
    +func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Generatevpnclientpackage the Generatevpnclientpackage operation generates
    +// Vpn client package for P2S client of the virtual network gateway in the
    +// specified resource group through Network resource provider.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayName is the name of the virtual network gateway.
    +// parameters is parameters supplied to the Begin Generating  Virtual Network
    +// Gateway Vpn client package operation through Network resource provider.
    +func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result String, err error) {
    +	req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GeneratevpnclientpackageSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GeneratevpnclientpackageResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GeneratevpnclientpackagePreparer prepares the Generatevpnclientpackage request.
    +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GeneratevpnclientpackageSender sends the Generatevpnclientpackage request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GeneratevpnclientpackageResponder handles the response to the Generatevpnclientpackage request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(resp *http.Response) (result String, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
    +		autorest.ByUnmarshallingJSON(&result.Value),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// Get the Get VirtualNetworkGateway operation retrieves information about the
    +// specified virtual network gateway through Network resource provider.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayName is the name of the virtual network gateway.
    +func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List VirtualNetworkGateways operation retrieves all the virtual
    +// network gateways stored.
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewaysClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, err error) {
    +	req, err := lastResults.VirtualNetworkGatewayListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// Reset the Reset VirtualNetworkGateway operation resets the primary of the
    +// virtual network gateway in the specified resource group through Network
    +// resource provider. This method may poll for completion. Polling can be
    +// canceled by passing the cancel channel argument. The channel will be used
    +// to cancel polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group.
    +// virtualNetworkGatewayName is the name of the virtual network gateway.
    +// parameters is parameters supplied to the Begin Reset Virtual Network
    +// Gateway operation through Network resource provider.
    +func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ResetSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ResetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ResetPreparer prepares the Reset request.
    +func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPost(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// ResetSender sends the Reset request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// ResetResponder handles the response to the Reset request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go
    new file mode 100644
    index 0000000..ba9dc74
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go
    @@ -0,0 +1,342 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualNetworkPeeringsClient is the the Microsoft Azure Network management
    +// API provides a RESTful set of web services that interact with Microsoft
    +// Azure Networks service to manage your network resources. The API has
    +// entities that capture the relationship between an end user and the
    +// Microsoft Azure Networks service.
    +type VirtualNetworkPeeringsClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualNetworkPeeringsClient creates an instance of the
    +// VirtualNetworkPeeringsClient client.
    +func NewVirtualNetworkPeeringsClient(subscriptionID string) VirtualNetworkPeeringsClient {
    +	return NewVirtualNetworkPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualNetworkPeeringsClientWithBaseURI creates an instance of the
    +// VirtualNetworkPeeringsClient client.
    +func NewVirtualNetworkPeeringsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkPeeringsClient {
    +	return VirtualNetworkPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CreateOrUpdate the Put virtual network peering operation creates/updates a
    +// peering in the specified virtual network This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. virtualNetworkPeeringName is the name of
    +// the peering. virtualNetworkPeeringParameters is parameters supplied to the
    +// create/update virtual network peering operation
    +func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, virtualNetworkPeeringParameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName":        autorest.Encode("path", virtualNetworkName),
    +		"virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters),
    +		autorest.WithJSON(virtualNetworkPeeringParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the delete virtual network peering operation deletes the specified
    +// peering. This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. virtualNetworkPeeringName is the name of
    +// the virtual network peering.
    +func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName":        autorest.Encode("path", virtualNetworkName),
    +		"virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get virtual network peering operation retrieves information about
    +// the specified virtual network peering.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. virtualNetworkPeeringName is the name of
    +// the virtual network peering.
    +func (client VirtualNetworkPeeringsClient) Get(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (result VirtualNetworkPeering, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":         autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":            autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName":        autorest.Encode("path", virtualNetworkName),
    +		"virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkPeeringsClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkPeeringsClient) GetResponder(resp *http.Response) (result VirtualNetworkPeering, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the List virtual network peerings operation retrieves all the peerings
    +// in a virtual network.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network.
    +func (client VirtualNetworkPeeringsClient) List(resourceGroupName string, virtualNetworkName string) (result VirtualNetworkPeeringListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName, virtualNetworkName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworkPeeringsClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworkPeeringsClient) ListResponder(resp *http.Response) (result VirtualNetworkPeeringListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualNetworkPeeringsClient) ListNextResults(lastResults VirtualNetworkPeeringListResult) (result VirtualNetworkPeeringListResult, err error) {
    +	req, err := lastResults.VirtualNetworkPeeringListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go
    new file mode 100644
    index 0000000..8975884
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go
    @@ -0,0 +1,488 @@
    +package network
    +
    +// Copyright (c) Microsoft and contributors.  All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +// http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +//
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +//
    +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
    +// Changes may cause incorrect behavior and will be lost if the code is
    +// regenerated.
    +
    +import (
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/azure"
    +	"net/http"
    +)
    +
    +// VirtualNetworksClient is the the Microsoft Azure Network management API
    +// provides a RESTful set of web services that interact with Microsoft Azure
    +// Networks service to manage your network resources. The API has entities
    +// that capture the relationship between an end user and the Microsoft Azure
    +// Networks service.
    +type VirtualNetworksClient struct {
    +	ManagementClient
    +}
    +
    +// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient
    +// client.
    +func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient {
    +	return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID)
    +}
    +
    +// NewVirtualNetworksClientWithBaseURI creates an instance of the
    +// VirtualNetworksClient client.
    +func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient {
    +	return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)}
    +}
    +
    +// CheckIPAddressAvailability checks whether a private Ip address is available
    +// for use.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. ipAddress is the private IP address to be
    +// verified.
    +func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, ipAddress string) (result IPAddressAvailabilityResult, err error) {
    +	req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, ipAddress)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CheckIPAddressAvailabilitySender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CheckIPAddressAvailabilityResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CheckIPAddressAvailabilityPreparer prepares the CheckIPAddressAvailability request.
    +func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceGroupName string, virtualNetworkName string, ipAddress string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(ipAddress) > 0 {
    +		queryParameters["ipAddress"] = autorest.Encode("query", ipAddress)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// CheckIPAddressAvailabilitySender sends the CheckIPAddressAvailability request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworksClient) CheckIPAddressAvailabilitySender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// CheckIPAddressAvailabilityResponder handles the response to the CheckIPAddressAvailability request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworksClient) CheckIPAddressAvailabilityResponder(resp *http.Response) (result IPAddressAvailabilityResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// CreateOrUpdate the Put VirtualNetwork operation creates/updates a virtual
    +// network in the specified resource group. This method may poll for
    +// completion. Polling can be canceled by passing the cancel channel
    +// argument. The channel will be used to cancel polling and any outstanding
    +// HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. parameters is parameters supplied to the
    +// create/update Virtual Network operation
    +func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.CreateOrUpdateSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.CreateOrUpdateResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
    +func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsJSON(),
    +		autorest.AsPut(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters),
    +		autorest.WithJSON(parameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Delete the Delete VirtualNetwork operation deletes the specified virtual
    +// network This method may poll for completion. Polling can be canceled by
    +// passing the cancel channel argument. The channel will be used to cancel
    +// polling and any outstanding HTTP requests.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network.
    +func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (result autorest.Response, err error) {
    +	req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, cancel)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.DeleteSender(req)
    +	if err != nil {
    +		result.Response = resp
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.DeleteResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// DeletePreparer prepares the Delete request.
    +func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsDelete(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{Cancel: cancel})
    +}
    +
    +// DeleteSender sends the Delete request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client,
    +		req,
    +		azure.DoPollForAsynchronous(client.PollingDelay))
    +}
    +
    +// DeleteResponder handles the response to the Delete request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK),
    +		autorest.ByClosing())
    +	result.Response = resp
    +	return
    +}
    +
    +// Get the Get VirtualNetwork operation retrieves information about the
    +// specified virtual network.
    +//
    +// resourceGroupName is the name of the resource group. virtualNetworkName is
    +// the name of the virtual network. expand is expand references resources.
    +func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) {
    +	req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.GetSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.GetResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// GetPreparer prepares the Get request.
    +func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string, expand string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName":  autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":     autorest.Encode("path", client.SubscriptionID),
    +		"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +	if len(expand) > 0 {
    +		queryParameters["$expand"] = autorest.Encode("query", expand)
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// GetSender sends the Get request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// GetResponder handles the response to the Get request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result VirtualNetwork, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// List the list VirtualNetwork returns all Virtual Networks in a resource
    +// group
    +//
    +// resourceGroupName is the name of the resource group.
    +func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, err error) {
    +	req, err := client.ListPreparer(resourceGroupName)
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListPreparer prepares the List request.
    +func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"resourceGroupName": autorest.Encode("path", resourceGroupName),
    +		"subscriptionId":    autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListSender sends the List request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListResponder handles the response to the List request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworksClient) ListResponder(resp *http.Response) (result VirtualNetworkListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListNextResults retrieves the next set of results, if any.
    +func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) {
    +	req, err := lastResults.VirtualNetworkListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    +
    +// ListAll the list VirtualNetwork returns all Virtual Networks in a
    +// subscription
    +func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, err error) {
    +	req, err := client.ListAllPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request")
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to request")
    +	}
    +
    +	return
    +}
    +
    +// ListAllPreparer prepares the ListAll request.
    +func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) {
    +	pathParameters := map[string]interface{}{
    +		"subscriptionId": autorest.Encode("path", client.SubscriptionID),
    +	}
    +
    +	queryParameters := map[string]interface{}{
    +		"api-version": client.APIVersion,
    +	}
    +
    +	preparer := autorest.CreatePreparer(
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(client.BaseURI),
    +		autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks", pathParameters),
    +		autorest.WithQueryParameters(queryParameters))
    +	return preparer.Prepare(&http.Request{})
    +}
    +
    +// ListAllSender sends the ListAll request. The method will close the
    +// http.Response Body if it receives an error.
    +func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) {
    +	return autorest.SendWithSender(client, req)
    +}
    +
    +// ListAllResponder handles the response to the ListAll request. The method always
    +// closes the http.Response Body.
    +func (client VirtualNetworksClient) ListAllResponder(resp *http.Response) (result VirtualNetworkListResult, err error) {
    +	err = autorest.Respond(
    +		resp,
    +		client.ByInspecting(),
    +		azure.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&result),
    +		autorest.ByClosing())
    +	result.Response = autorest.Response{Response: resp}
    +	return
    +}
    +
    +// ListAllNextResults retrieves the next set of results, if any.
    +func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) {
    +	req, err := lastResults.VirtualNetworkListResultPreparer()
    +	if err != nil {
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing next results request")
    +	}
    +	if req == nil {
    +		return
    +	}
    +
    +	resp, err := client.ListAllSender(req)
    +	if err != nil {
    +		result.Response = autorest.Response{Response: resp}
    +		return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending next results request")
    +	}
    +
    +	result, err = client.ListAllResponder(resp)
    +	if err != nil {
    +		err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to next results request")
    +	}
    +
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/LICENSE b/src/prometheus/vendor/github.com/Azure/go-autorest/LICENSE
    new file mode 100644
    index 0000000..b9d6a27
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/LICENSE
    @@ -0,0 +1,191 @@
    +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   Copyright 2015 Microsoft Corporation
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/autorest.go
    new file mode 100644
    index 0000000..9804f40
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/autorest.go
    @@ -0,0 +1,114 @@
    +/*
    +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
    +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
    +generated Go code.
    +
    +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
    +and Responding. A typical pattern is:
    +
    +  req, err := Prepare(&http.Request{},
    +    token.WithAuthorization())
    +
    +  resp, err := Send(req,
    +    WithLogging(logger),
    +    DoErrorIfStatusCode(http.StatusInternalServerError),
    +    DoCloseIfError(),
    +    DoRetryForAttempts(5, time.Second))
    +
    +  err = Respond(resp,
    +    ByClosing())
    +
    +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
    +and then pass the data along, pass the data first and then modify the result, or wrap themselves
    +around passing the data (such as a logger might do). Decorators run in the order provided. For
    +example, the following:
    +
    +  req, err := Prepare(&http.Request{},
    +    WithBaseURL("https://microsoft.com/"),
    +    WithPath("a"),
    +    WithPath("b"),
    +    WithPath("c"))
    +
    +will set the URL to:
    +
    +  https://microsoft.com/a/b/c
    +
    +Preparers and Responders may be shared and re-used (assuming the underlying decorators support
    +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
    +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
    +all bound together by means of input / output channels.
    +
    +Decorators hold their passed state within a closure (such as the path components in the example
    +above). Be careful to share Preparers and Responders only in a context where such held state
    +applies. For example, it may not make sense to share a Preparer that applies a query string from a
    +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
    +struct (e.g., ByUnmarshallingJson) is likely incorrect.
    +
    +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
    +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
    +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
    +correct parsing and formatting.
    +
    +Errors raised by autorest objects and methods will conform to the autorest.Error interface.
    +
    +See the included examples for more detail. For details on the suggested use of this package by
    +generated clients, see the Client described below.
    +*/
    +package autorest
    +
    +import (
    +	"net/http"
    +	"time"
    +)
    +
    +const (
    +	// HeaderLocation specifies the HTTP Location header.
    +	HeaderLocation = "Location"
    +
    +	// HeaderRetryAfter specifies the HTTP Retry-After header.
    +	HeaderRetryAfter = "Retry-After"
    +)
    +
    +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
    +// and false otherwise.
    +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
    +	return containsInt(codes, resp.StatusCode)
    +}
    +
    +// GetLocation retrieves the URL from the Location header of the passed response.
    +func GetLocation(resp *http.Response) string {
    +	return resp.Header.Get(HeaderLocation)
    +}
    +
    +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
    +// the header is absent or is malformed, it will return the supplied default delay time.Duration.
    +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
    +	retry := resp.Header.Get(HeaderRetryAfter)
    +	if retry == "" {
    +		return defaultDelay
    +	}
    +
    +	d, err := time.ParseDuration(retry + "s")
    +	if err != nil {
    +		return defaultDelay
    +	}
    +
    +	return d
    +}
    +
    +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
    +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
    +	location := GetLocation(resp)
    +	if location == "" {
    +		return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
    +	}
    +
    +	req, err := Prepare(&http.Request{Cancel: cancel},
    +		AsGet(),
    +		WithBaseURL(location))
    +	if err != nil {
    +		return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
    +	}
    +
    +	return req, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
    new file mode 100644
    index 0000000..280d32a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
    @@ -0,0 +1,307 @@
    +package azure
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/Azure/go-autorest/autorest/date"
    +	"io/ioutil"
    +	"net/http"
    +	"strings"
    +	"time"
    +)
    +
    +const (
    +	headerAsyncOperation = "Azure-AsyncOperation"
    +)
    +
    +const (
    +	methodDelete = "DELETE"
    +	methodPatch  = "PATCH"
    +	methodPost   = "POST"
    +	methodPut    = "PUT"
    +	methodGet    = "GET"
    +
    +	operationInProgress string = "InProgress"
    +	operationCanceled   string = "Canceled"
    +	operationFailed     string = "Failed"
    +	operationSucceeded  string = "Succeeded"
    +)
    +
    +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
    +// long-running operation. It will delay between requests for the duration specified in the
    +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
    +// closing the optional channel on the http.Request.
    +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
    +	return func(s autorest.Sender) autorest.Sender {
    +		return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
    +			resp, err = s.Do(r)
    +			if err != nil {
    +				return resp, err
    +			}
    +			pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK}
    +			if !autorest.ResponseHasStatusCode(resp, pollingCodes...) {
    +				return resp, nil
    +			}
    +
    +			ps := pollingState{}
    +			for err == nil {
    +				err = updatePollingState(resp, &ps)
    +				if err != nil {
    +					break
    +				}
    +				if ps.hasTerminated() {
    +					if !ps.hasSucceeded() {
    +						err = ps
    +					}
    +					break
    +				}
    +
    +				r, err = newPollingRequest(resp, ps)
    +				if err != nil {
    +					return resp, err
    +				}
    +
    +				delay = autorest.GetRetryAfter(resp, delay)
    +				resp, err = autorest.SendWithSender(s, r,
    +					autorest.AfterDelay(delay))
    +			}
    +
    +			return resp, err
    +		})
    +	}
    +}
    +
    +func getAsyncOperation(resp *http.Response) string {
    +	return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
    +}
    +
    +func hasSucceeded(state string) bool {
    +	return state == operationSucceeded
    +}
    +
    +func hasTerminated(state string) bool {
    +	switch state {
    +	case operationCanceled, operationFailed, operationSucceeded:
    +		return true
    +	default:
    +		return false
    +	}
    +}
    +
    +func hasFailed(state string) bool {
    +	return state == operationFailed
    +}
    +
    +type provisioningTracker interface {
    +	state() string
    +	hasSucceeded() bool
    +	hasTerminated() bool
    +}
    +
    +type operationResource struct {
    +	// Note:
    +	// 	The specification states services should return the "id" field. However some return it as
    +	// 	"operationId".
    +	ID              string                 `json:"id"`
    +	OperationID     string                 `json:"operationId"`
    +	Name            string                 `json:"name"`
    +	Status          string                 `json:"status"`
    +	Properties      map[string]interface{} `json:"properties"`
    +	OperationError  ServiceError           `json:"error"`
    +	StartTime       date.Time              `json:"startTime"`
    +	EndTime         date.Time              `json:"endTime"`
    +	PercentComplete float64                `json:"percentComplete"`
    +}
    +
    +func (or operationResource) state() string {
    +	return or.Status
    +}
    +
    +func (or operationResource) hasSucceeded() bool {
    +	return hasSucceeded(or.state())
    +}
    +
    +func (or operationResource) hasTerminated() bool {
    +	return hasTerminated(or.state())
    +}
    +
    +type provisioningProperties struct {
    +	ProvisioningState string `json:"provisioningState"`
    +}
    +
    +type provisioningStatus struct {
    +	Properties        provisioningProperties `json:"properties,omitempty"`
    +	ProvisioningError ServiceError           `json:"error,omitempty"`
    +}
    +
    +func (ps provisioningStatus) state() string {
    +	return ps.Properties.ProvisioningState
    +}
    +
    +func (ps provisioningStatus) hasSucceeded() bool {
    +	return hasSucceeded(ps.state())
    +}
    +
    +func (ps provisioningStatus) hasTerminated() bool {
    +	return hasTerminated(ps.state())
    +}
    +
    +func (ps provisioningStatus) hasProvisioningError() bool {
    +	return ps.ProvisioningError != ServiceError{}
    +}
    +
    +type pollingResponseFormat string
    +
    +const (
    +	usesOperationResponse  pollingResponseFormat = "OperationResponse"
    +	usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus"
    +	formatIsUnknown        pollingResponseFormat = ""
    +)
    +
    +type pollingState struct {
    +	responseFormat pollingResponseFormat
    +	uri            string
    +	state          string
    +	code           string
    +	message        string
    +}
    +
    +func (ps pollingState) hasSucceeded() bool {
    +	return hasSucceeded(ps.state)
    +}
    +
    +func (ps pollingState) hasTerminated() bool {
    +	return hasTerminated(ps.state)
    +}
    +
    +func (ps pollingState) hasFailed() bool {
    +	return hasFailed(ps.state)
    +}
    +
    +func (ps pollingState) Error() string {
    +	return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message)
    +}
    +
    +//	updatePollingState maps the operation status -- retrieved from either a provisioningState
    +// 	field, the status field of an OperationResource, or inferred from the HTTP status code --
    +// 	into a well-known states. Since the process begins from the initial request, the state
    +//	always comes from either a the provisioningState returned or is inferred from the HTTP
    +//	status code. Subsequent requests will read an Azure OperationResource object if the
    +//	service initially returned the Azure-AsyncOperation header. The responseFormat field notes
    +//	the expected response format.
    +func updatePollingState(resp *http.Response, ps *pollingState) error {
    +	// Determine the response shape
    +	// -- The first response will always be a provisioningStatus response; only the polling requests,
    +	//    depending on the header returned, may be something otherwise.
    +	var pt provisioningTracker
    +	if ps.responseFormat == usesOperationResponse {
    +		pt = &operationResource{}
    +	} else {
    +		pt = &provisioningStatus{}
    +	}
    +
    +	// If this is the first request (that is, the polling response shape is unknown), determine how
    +	// to poll and what to expect
    +	if ps.responseFormat == formatIsUnknown {
    +		req := resp.Request
    +		if req == nil {
    +			return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
    +		}
    +
    +		// Prefer the Azure-AsyncOperation header
    +		ps.uri = getAsyncOperation(resp)
    +		if ps.uri != "" {
    +			ps.responseFormat = usesOperationResponse
    +		} else {
    +			ps.responseFormat = usesProvisioningStatus
    +		}
    +
    +		// Else, use the Location header
    +		if ps.uri == "" {
    +			ps.uri = autorest.GetLocation(resp)
    +		}
    +
    +		// Lastly, requests against an existing resource, use the last request URI
    +		if ps.uri == "" {
    +			m := strings.ToUpper(req.Method)
    +			if m == methodPatch || m == methodPut || m == methodGet {
    +				ps.uri = req.URL.String()
    +			}
    +		}
    +	}
    +
    +	// Read and interpret the response (saving the Body in case no polling is necessary)
    +	b := &bytes.Buffer{}
    +	err := autorest.Respond(resp,
    +		autorest.ByCopying(b),
    +		autorest.ByUnmarshallingJSON(pt),
    +		autorest.ByClosing())
    +	resp.Body = ioutil.NopCloser(b)
    +	if err != nil {
    +		return err
    +	}
    +
    +	// Interpret the results
    +	// -- Terminal states apply regardless
    +	// -- Unknown states are per-service inprogress states
    +	// -- Otherwise, infer state from HTTP status code
    +	if pt.hasTerminated() {
    +		ps.state = pt.state()
    +	} else if pt.state() != "" {
    +		ps.state = operationInProgress
    +	} else {
    +		switch resp.StatusCode {
    +		case http.StatusAccepted:
    +			ps.state = operationInProgress
    +
    +		case http.StatusNoContent, http.StatusCreated, http.StatusOK:
    +			ps.state = operationSucceeded
    +
    +		default:
    +			ps.state = operationFailed
    +		}
    +	}
    +
    +	if ps.state == operationInProgress && ps.uri == "" {
    +		return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
    +	}
    +
    +	// For failed operation, check for error code and message in
    +	// -- Operation resource
    +	// -- Response
    +	// -- Otherwise, Unknown
    +	if ps.hasFailed() {
    +		if ps.responseFormat == usesOperationResponse {
    +			or := pt.(*operationResource)
    +			ps.code = or.OperationError.Code
    +			ps.message = or.OperationError.Message
    +		} else {
    +			p := pt.(*provisioningStatus)
    +			if p.hasProvisioningError() {
    +				ps.code = p.ProvisioningError.Code
    +				ps.message = p.ProvisioningError.Message
    +			} else {
    +				ps.code = "Unknown"
    +				ps.message = "None"
    +			}
    +		}
    +	}
    +	return nil
    +}
    +
    +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) {
    +	req := resp.Request
    +	if req == nil {
    +		return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing")
    +	}
    +
    +	reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel},
    +		autorest.AsGet(),
    +		autorest.WithBaseURL(ps.uri))
    +	if err != nil {
    +		return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri)
    +	}
    +
    +	return reqPoll, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
    new file mode 100644
    index 0000000..3f4d134
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
    @@ -0,0 +1,180 @@
    +/*
    +Package azure provides Azure-specific implementations used with AutoRest.
    +
    +See the included examples for more detail.
    +*/
    +package azure
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"io/ioutil"
    +	"net/http"
    +	"strconv"
    +
    +	"github.com/Azure/go-autorest/autorest"
    +)
    +
    +const (
    +	// HeaderClientID is the Azure extension header to set a user-specified request ID.
    +	HeaderClientID = "x-ms-client-request-id"
    +
    +	// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
    +	// should be included in the response.
    +	HeaderReturnClientID = "x-ms-return-client-request-id"
    +
    +	// HeaderRequestID is the Azure extension header of the service generated request ID returned
    +	// in the response.
    +	HeaderRequestID = "x-ms-request-id"
    +)
    +
    +// ServiceError encapsulates the error response from an Azure service.
    +type ServiceError struct {
    +	Code    string         `json:"code"`
    +	Message string         `json:"message"`
    +	Details *[]interface{} `json:"details"`
    +}
    +
    +func (se ServiceError) Error() string {
    +	if se.Details != nil {
    +		d, err := json.Marshal(*(se.Details))
    +		if err != nil {
    +			return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details)
    +		}
    +		return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d))
    +	}
    +	return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
    +}
    +
    +// RequestError describes an error response returned by Azure service.
    +type RequestError struct {
    +	autorest.DetailedError
    +
    +	// The error returned by the Azure service.
    +	ServiceError *ServiceError `json:"error"`
    +
    +	// The request id (from the x-ms-request-id-header) of the request.
    +	RequestID string
    +}
    +
    +// Error returns a human-friendly error message from service error.
    +func (e RequestError) Error() string {
    +	return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
    +		e.StatusCode, e.ServiceError)
    +}
    +
    +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
    +func IsAzureError(e error) bool {
    +	_, ok := e.(*RequestError)
    +	return ok
    +}
    +
    +// NewErrorWithError creates a new Error conforming object from the
    +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
    +// if resp is nil), message, and original error. message is treated as a format
    +// string to which the optional args apply.
    +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
    +	if v, ok := original.(*RequestError); ok {
    +		return *v
    +	}
    +
    +	statusCode := autorest.UndefinedStatusCode
    +	if resp != nil {
    +		statusCode = resp.StatusCode
    +	}
    +	return RequestError{
    +		DetailedError: autorest.DetailedError{
    +			Original:    original,
    +			PackageType: packageType,
    +			Method:      method,
    +			StatusCode:  statusCode,
    +			Message:     fmt.Sprintf(message, args...),
    +		},
    +	}
    +}
    +
    +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
    +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
    +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
    +// header to true such that UUID accompanies the http.Response.
    +func WithReturningClientID(uuid string) autorest.PrepareDecorator {
    +	preparer := autorest.CreatePreparer(
    +		WithClientID(uuid),
    +		WithReturnClientID(true))
    +
    +	return func(p autorest.Preparer) autorest.Preparer {
    +		return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err != nil {
    +				return r, err
    +			}
    +			return preparer.Prepare(r)
    +		})
    +	}
    +}
    +
    +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
    +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
    +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
    +func WithClientID(uuid string) autorest.PrepareDecorator {
    +	return autorest.WithHeader(HeaderClientID, uuid)
    +}
    +
    +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
    +// x-ms-return-client-request-id whose boolean value indicates if the value of the
    +// x-ms-client-request-id header should be included in the http.Response.
    +func WithReturnClientID(b bool) autorest.PrepareDecorator {
    +	return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
    +}
    +
    +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
    +// http.Request sent to the service (and returned in the http.Response)
    +func ExtractClientID(resp *http.Response) string {
    +	return autorest.ExtractHeaderValue(HeaderClientID, resp)
    +}
    +
    +// ExtractRequestID extracts the Azure server generated request identifier from the
    +// x-ms-request-id header.
    +func ExtractRequestID(resp *http.Response) string {
    +	return autorest.ExtractHeaderValue(HeaderRequestID, resp)
    +}
    +
    +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
    +// azure.RequestError by reading the response body unless the response HTTP status code
    +// is among the set passed.
    +//
    +// If there is a chance service may return responses other than the Azure error
    +// format and the response cannot be parsed into an error, a decoding error will
    +// be returned containing the response body. In any case, the Responder will
    +// return an error if the status code is not satisfied.
    +//
    +// If this Responder returns an error, the response body will be replaced with
    +// an in-memory reader, which needs no further closing.
    +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
    +	return func(r autorest.Responder) autorest.Responder {
    +		return autorest.ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
    +				var e RequestError
    +				defer resp.Body.Close()
    +
    +				// Copy and replace the Body in case it does not contain an error object.
    +				// This will leave the Body available to the caller.
    +				b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
    +				resp.Body = ioutil.NopCloser(&b)
    +				if decodeErr != nil {
    +					return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
    +				} else if e.ServiceError == nil {
    +					e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"}
    +				}
    +
    +				e.RequestID = ExtractRequestID(resp)
    +				if e.StatusCode == nil {
    +					e.StatusCode = resp.StatusCode
    +				}
    +				err = &e
    +			}
    +			return err
    +		})
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/config.go
    new file mode 100644
    index 0000000..bea30b0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/config.go
    @@ -0,0 +1,13 @@
    +package azure
    +
    +import (
    +	"net/url"
    +)
    +
    +// OAuthConfig represents the endpoints needed
    +// in OAuth operations
    +type OAuthConfig struct {
    +	AuthorizeEndpoint  url.URL
    +	TokenEndpoint      url.URL
    +	DeviceCodeEndpoint url.URL
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go
    new file mode 100644
    index 0000000..e1d5498
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go
    @@ -0,0 +1,193 @@
    +package azure
    +
    +/*
    +  This file is largely based on rjw57/oauth2device's code, with the follow differences:
    +   * scope -> resource, and only allow a single one
    +   * receive "Message" in the DeviceCode struct and show it to users as the prompt
    +   * azure-xplat-cli has the following behavior that this emulates:
    +     - does not send client_secret during the token exchange
    +     - sends resource again in the token exchange request
    +*/
    +
    +import (
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +	"time"
    +
    +	"github.com/Azure/go-autorest/autorest"
    +)
    +
    +const (
    +	logPrefix = "autorest/azure/devicetoken:"
    +)
    +
    +var (
    +	// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
    +	ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
    +
    +	// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
    +	ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
    +
    +	// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
    +	ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
    +
    +	// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
    +	ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
    +
    +	// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
    +	ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
    +
    +	errCodeSendingFails   = "Error occurred while sending request for Device Authorization Code"
    +	errCodeHandlingFails  = "Error occurred while handling response from the Device Endpoint"
    +	errTokenSendingFails  = "Error occurred while sending request with device code for a token"
    +	errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
    +)
    +
    +// DeviceCode is the object returned by the device auth endpoint
    +// It contains information to instruct the user to complete the auth flow
    +type DeviceCode struct {
    +	DeviceCode      *string `json:"device_code,omitempty"`
    +	UserCode        *string `json:"user_code,omitempty"`
    +	VerificationURL *string `json:"verification_url,omitempty"`
    +	ExpiresIn       *int64  `json:"expires_in,string,omitempty"`
    +	Interval        *int64  `json:"interval,string,omitempty"`
    +
    +	Message     *string `json:"message"` // Azure specific
    +	Resource    string  // store the following, stored when initiating, used when exchanging
    +	OAuthConfig OAuthConfig
    +	ClientID    string
    +}
    +
    +// TokenError is the object returned by the token exchange endpoint
    +// when something is amiss
    +type TokenError struct {
    +	Error            *string `json:"error,omitempty"`
    +	ErrorCodes       []int   `json:"error_codes,omitempty"`
    +	ErrorDescription *string `json:"error_description,omitempty"`
    +	Timestamp        *string `json:"timestamp,omitempty"`
    +	TraceID          *string `json:"trace_id,omitempty"`
    +}
    +
    +// DeviceToken is the object return by the token exchange endpoint
    +// It can either look like a Token or an ErrorToken, so put both here
    +// and check for presence of "Error" to know if we are in error state
    +type deviceToken struct {
    +	Token
    +	TokenError
    +}
    +
    +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
    +// that can be used with CheckForUserCompletion or WaitForUserCompletion.
    +func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
    +	req, _ := autorest.Prepare(
    +		&http.Request{},
    +		autorest.AsPost(),
    +		autorest.AsFormURLEncoded(),
    +		autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()),
    +		autorest.WithFormData(url.Values{
    +			"client_id": []string{clientID},
    +			"resource":  []string{resource},
    +		}),
    +	)
    +
    +	resp, err := autorest.SendWithSender(client, req)
    +	if err != nil {
    +		return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err)
    +	}
    +
    +	var code DeviceCode
    +	err = autorest.Respond(
    +		resp,
    +		autorest.WithErrorUnlessStatusCode(http.StatusOK),
    +		autorest.ByUnmarshallingJSON(&code),
    +		autorest.ByClosing())
    +	if err != nil {
    +		return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err)
    +	}
    +
    +	code.ClientID = clientID
    +	code.Resource = resource
    +	code.OAuthConfig = oauthConfig
    +
    +	return &code, nil
    +}
    +
    +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
    +// to see if the device flow has: been completed, timed out, or otherwise failed
    +func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
    +	req, _ := autorest.Prepare(
    +		&http.Request{},
    +		autorest.AsPost(),
    +		autorest.AsFormURLEncoded(),
    +		autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()),
    +		autorest.WithFormData(url.Values{
    +			"client_id":  []string{code.ClientID},
    +			"code":       []string{*code.DeviceCode},
    +			"grant_type": []string{OAuthGrantTypeDeviceCode},
    +			"resource":   []string{code.Resource},
    +		}),
    +	)
    +
    +	resp, err := autorest.SendWithSender(client, req)
    +	if err != nil {
    +		return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err)
    +	}
    +
    +	var token deviceToken
    +	err = autorest.Respond(
    +		resp,
    +		autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest),
    +		autorest.ByUnmarshallingJSON(&token),
    +		autorest.ByClosing())
    +	if err != nil {
    +		return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err)
    +	}
    +
    +	if token.Error == nil {
    +		return &token.Token, nil
    +	}
    +
    +	switch *token.Error {
    +	case "authorization_pending":
    +		return nil, ErrDeviceAuthorizationPending
    +	case "slow_down":
    +		return nil, ErrDeviceSlowDown
    +	case "access_denied":
    +		return nil, ErrDeviceAccessDenied
    +	case "code_expired":
    +		return nil, ErrDeviceCodeExpired
    +	default:
    +		return nil, ErrDeviceGeneric
    +	}
    +}
    +
    +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
    +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
    +func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
    +	intervalDuration := time.Duration(*code.Interval) * time.Second
    +	waitDuration := intervalDuration
    +
    +	for {
    +		token, err := CheckForUserCompletion(client, code)
    +
    +		if err == nil {
    +			return token, nil
    +		}
    +
    +		switch err {
    +		case ErrDeviceSlowDown:
    +			waitDuration += waitDuration
    +		case ErrDeviceAuthorizationPending:
    +			// noop
    +		default: // everything else is "fatal" to us
    +			return nil, err
    +		}
    +
    +		if waitDuration > (intervalDuration * 3) {
    +			return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
    +		}
    +
    +		time.Sleep(waitDuration)
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
    new file mode 100644
    index 0000000..c9d9c46
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
    @@ -0,0 +1,157 @@
    +package azure
    +
    +import (
    +	"fmt"
    +	"net/url"
    +	"strings"
    +)
    +
    +const (
    +	activeDirectoryAPIVersion = "1.0"
    +)
    +
    +var environments = map[string]Environment{
    +	"AZURECHINACLOUD":        ChinaCloud,
    +	"AZUREGERMANCLOUD":       GermanCloud,
    +	"AZUREPUBLICCLOUD":       PublicCloud,
    +	"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
    +}
    +
    +// Environment represents a set of endpoints for each of Azure's Clouds.
    +type Environment struct {
    +	Name                         string `json:"name"`
    +	ManagementPortalURL          string `json:"managementPortalURL"`
    +	PublishSettingsURL           string `json:"publishSettingsURL"`
    +	ServiceManagementEndpoint    string `json:"serviceManagementEndpoint"`
    +	ResourceManagerEndpoint      string `json:"resourceManagerEndpoint"`
    +	ActiveDirectoryEndpoint      string `json:"activeDirectoryEndpoint"`
    +	GalleryEndpoint              string `json:"galleryEndpoint"`
    +	KeyVaultEndpoint             string `json:"keyVaultEndpoint"`
    +	GraphEndpoint                string `json:"graphEndpoint"`
    +	StorageEndpointSuffix        string `json:"storageEndpointSuffix"`
    +	SQLDatabaseDNSSuffix         string `json:"sqlDatabaseDNSSuffix"`
    +	TrafficManagerDNSSuffix      string `json:"trafficManagerDNSSuffix"`
    +	KeyVaultDNSSuffix            string `json:"keyVaultDNSSuffix"`
    +	ServiceBusEndpointSuffix     string `json:"serviceBusEndpointSuffix"`
    +	ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
    +	ResourceManagerVMDNSSuffix   string `json:"resourceManagerVMDNSSuffix"`
    +}
    +
    +var (
    +	// PublicCloud is the default public Azure cloud environment
    +	PublicCloud = Environment{
    +		Name:                         "AzurePublicCloud",
    +		ManagementPortalURL:          "https://manage.windowsazure.com/",
    +		PublishSettingsURL:           "https://manage.windowsazure.com/publishsettings/index",
    +		ServiceManagementEndpoint:    "https://management.core.windows.net/",
    +		ResourceManagerEndpoint:      "https://management.azure.com/",
    +		ActiveDirectoryEndpoint:      "https://login.microsoftonline.com/",
    +		GalleryEndpoint:              "https://gallery.azure.com/",
    +		KeyVaultEndpoint:             "https://vault.azure.net/",
    +		GraphEndpoint:                "https://graph.windows.net/",
    +		StorageEndpointSuffix:        "core.windows.net",
    +		SQLDatabaseDNSSuffix:         "database.windows.net",
    +		TrafficManagerDNSSuffix:      "trafficmanager.net",
    +		KeyVaultDNSSuffix:            "vault.azure.net",
    +		ServiceBusEndpointSuffix:     "servicebus.azure.com",
    +		ServiceManagementVMDNSSuffix: "cloudapp.net",
    +		ResourceManagerVMDNSSuffix:   "cloudapp.azure.com",
    +	}
    +
    +	// USGovernmentCloud is the cloud environment for the US Government
    +	USGovernmentCloud = Environment{
    +		Name:                         "AzureUSGovernmentCloud",
    +		ManagementPortalURL:          "https://manage.windowsazure.us/",
    +		PublishSettingsURL:           "https://manage.windowsazure.us/publishsettings/index",
    +		ServiceManagementEndpoint:    "https://management.core.usgovcloudapi.net/",
    +		ResourceManagerEndpoint:      "https://management.usgovcloudapi.net/",
    +		ActiveDirectoryEndpoint:      "https://login.microsoftonline.com/",
    +		GalleryEndpoint:              "https://gallery.usgovcloudapi.net/",
    +		KeyVaultEndpoint:             "https://vault.usgovcloudapi.net/",
    +		GraphEndpoint:                "https://graph.usgovcloudapi.net/",
    +		StorageEndpointSuffix:        "core.usgovcloudapi.net",
    +		SQLDatabaseDNSSuffix:         "database.usgovcloudapi.net",
    +		TrafficManagerDNSSuffix:      "usgovtrafficmanager.net",
    +		KeyVaultDNSSuffix:            "vault.usgovcloudapi.net",
    +		ServiceBusEndpointSuffix:     "servicebus.usgovcloudapi.net",
    +		ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
    +		ResourceManagerVMDNSSuffix:   "cloudapp.windowsazure.us",
    +	}
    +
    +	// ChinaCloud is the cloud environment operated in China
    +	ChinaCloud = Environment{
    +		Name:                         "AzureChinaCloud",
    +		ManagementPortalURL:          "https://manage.chinacloudapi.com/",
    +		PublishSettingsURL:           "https://manage.chinacloudapi.com/publishsettings/index",
    +		ServiceManagementEndpoint:    "https://management.core.chinacloudapi.cn/",
    +		ResourceManagerEndpoint:      "https://management.chinacloudapi.cn/",
    +		ActiveDirectoryEndpoint:      "https://login.chinacloudapi.cn/?api-version=1.0",
    +		GalleryEndpoint:              "https://gallery.chinacloudapi.cn/",
    +		KeyVaultEndpoint:             "https://vault.azure.cn/",
    +		GraphEndpoint:                "https://graph.chinacloudapi.cn/",
    +		StorageEndpointSuffix:        "core.chinacloudapi.cn",
    +		SQLDatabaseDNSSuffix:         "database.chinacloudapi.cn",
    +		TrafficManagerDNSSuffix:      "trafficmanager.cn",
    +		KeyVaultDNSSuffix:            "vault.azure.cn",
    +		ServiceBusEndpointSuffix:     "servicebus.chinacloudapi.net",
    +		ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
    +		ResourceManagerVMDNSSuffix:   "cloudapp.azure.cn",
    +	}
    +
    +	// GermanCloud is the cloud environment operated in Germany
    +	GermanCloud = Environment{
    +		Name:                         "AzureGermanCloud",
    +		ManagementPortalURL:          "http://portal.microsoftazure.de/",
    +		PublishSettingsURL:           "https://manage.microsoftazure.de/publishsettings/index",
    +		ServiceManagementEndpoint:    "https://management.core.cloudapi.de/",
    +		ResourceManagerEndpoint:      "https://management.microsoftazure.de/",
    +		ActiveDirectoryEndpoint:      "https://login.microsoftonline.de/",
    +		GalleryEndpoint:              "https://gallery.cloudapi.de/",
    +		KeyVaultEndpoint:             "https://vault.microsoftazure.de/",
    +		GraphEndpoint:                "https://graph.cloudapi.de/",
    +		StorageEndpointSuffix:        "core.cloudapi.de",
    +		SQLDatabaseDNSSuffix:         "database.cloudapi.de",
    +		TrafficManagerDNSSuffix:      "azuretrafficmanager.de",
    +		KeyVaultDNSSuffix:            "vault.microsoftazure.de",
    +		ServiceBusEndpointSuffix:     "servicebus.cloudapi.de",
    +		ServiceManagementVMDNSSuffix: "azurecloudapp.de",
    +		ResourceManagerVMDNSSuffix:   "cloudapp.microsoftazure.de",
    +	}
    +)
    +
    +// EnvironmentFromName returns an Environment based on the common name specified
    +func EnvironmentFromName(name string) (Environment, error) {
    +	name = strings.ToUpper(name)
    +	env, ok := environments[name]
    +	if !ok {
    +		return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
    +	}
    +	return env, nil
    +}
    +
    +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls
    +func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) {
    +	template := "%s/oauth2/%s?api-version=%s"
    +	u, err := url.Parse(env.ActiveDirectoryEndpoint)
    +	if err != nil {
    +		return nil, err
    +	}
    +	authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion))
    +	if err != nil {
    +		return nil, err
    +	}
    +	tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion))
    +	if err != nil {
    +		return nil, err
    +	}
    +	deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion))
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return &OAuthConfig{
    +		AuthorizeEndpoint:  *authorizeURL,
    +		TokenEndpoint:      *tokenURL,
    +		DeviceCodeEndpoint: *deviceCodeURL,
    +	}, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go
    new file mode 100644
    index 0000000..d5cf62d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go
    @@ -0,0 +1,59 @@
    +package azure
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"io/ioutil"
    +	"os"
    +	"path/filepath"
    +)
    +
    +// LoadToken restores a Token object from a file located at 'path'.
    +func LoadToken(path string) (*Token, error) {
    +	file, err := os.Open(path)
    +	if err != nil {
    +		return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
    +	}
    +	defer file.Close()
    +
    +	var token Token
    +
    +	dec := json.NewDecoder(file)
    +	if err = dec.Decode(&token); err != nil {
    +		return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
    +	}
    +	return &token, nil
    +}
    +
    +// SaveToken persists an oauth token at the given location on disk.
    +// It moves the new file into place so it can safely be used to replace an existing file
    +// that maybe accessed by multiple processes.
    +func SaveToken(path string, mode os.FileMode, token Token) error {
    +	dir := filepath.Dir(path)
    +	err := os.MkdirAll(dir, os.ModePerm)
    +	if err != nil {
    +		return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
    +	}
    +
    +	newFile, err := ioutil.TempFile(dir, "token")
    +	if err != nil {
    +		return fmt.Errorf("failed to create the temp file to write the token: %v", err)
    +	}
    +	tempPath := newFile.Name()
    +
    +	if err := json.NewEncoder(newFile).Encode(token); err != nil {
    +		return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
    +	}
    +	if err := newFile.Close(); err != nil {
    +		return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
    +	}
    +
    +	// Atomic replace to avoid multi-writer file corruptions
    +	if err := os.Rename(tempPath, path); err != nil {
    +		return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
    +	}
    +	if err := os.Chmod(path, mode); err != nil {
    +		return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/token.go
    new file mode 100644
    index 0000000..db9a8fa
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/azure/token.go
    @@ -0,0 +1,363 @@
    +package azure
    +
    +import (
    +	"crypto/rand"
    +	"crypto/rsa"
    +	"crypto/sha1"
    +	"crypto/x509"
    +	"encoding/base64"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +	"strconv"
    +	"time"
    +
    +	"github.com/Azure/go-autorest/autorest"
    +	"github.com/dgrijalva/jwt-go"
    +)
    +
    +const (
    +	defaultRefresh = 5 * time.Minute
    +	tokenBaseDate  = "1970-01-01T00:00:00Z"
    +
    +	// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
    +	OAuthGrantTypeDeviceCode = "device_code"
    +
    +	// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
    +	OAuthGrantTypeClientCredentials = "client_credentials"
    +
    +	// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
    +	OAuthGrantTypeRefreshToken = "refresh_token"
    +)
    +
    +var expirationBase time.Time
    +
    +func init() {
    +	expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate)
    +}
    +
    +// TokenRefreshCallback is the type representing callbacks that will be called after
    +// a successful token refresh
    +type TokenRefreshCallback func(Token) error
    +
    +// Token encapsulates the access token used to authorize Azure requests.
    +type Token struct {
    +	AccessToken  string `json:"access_token"`
    +	RefreshToken string `json:"refresh_token"`
    +
    +	ExpiresIn string `json:"expires_in"`
    +	ExpiresOn string `json:"expires_on"`
    +	NotBefore string `json:"not_before"`
    +
    +	Resource string `json:"resource"`
    +	Type     string `json:"token_type"`
    +}
    +
    +// Expires returns the time.Time when the Token expires.
    +func (t Token) Expires() time.Time {
    +	s, err := strconv.Atoi(t.ExpiresOn)
    +	if err != nil {
    +		s = -3600
    +	}
    +	return expirationBase.Add(time.Duration(s) * time.Second).UTC()
    +}
    +
    +// IsExpired returns true if the Token is expired, false otherwise.
    +func (t Token) IsExpired() bool {
    +	return t.WillExpireIn(0)
    +}
    +
    +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
    +// from now, false otherwise.
    +func (t Token) WillExpireIn(d time.Duration) bool {
    +	return !t.Expires().After(time.Now().Add(d))
    +}
    +
    +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
    +// value is "Bearer " followed by the AccessToken of the Token.
    +func (t *Token) WithAuthorization() autorest.PrepareDecorator {
    +	return func(p autorest.Preparer) autorest.Preparer {
    +		return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r)
    +		})
    +	}
    +}
    +
    +// ServicePrincipalNoSecret represents a secret type that contains no secret
    +// meaning it is not valid for fetching a fresh token. This is used by Manual
    +type ServicePrincipalNoSecret struct {
    +}
    +
    +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
    +// It only returns an error for the ServicePrincipalNoSecret type
    +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
    +	return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token.")
    +}
    +
    +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
    +// that is submitted when acquiring an oAuth token.
    +type ServicePrincipalSecret interface {
    +	SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
    +}
    +
    +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
    +type ServicePrincipalTokenSecret struct {
    +	ClientSecret string
    +}
    +
    +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
    +// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
    +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
    +	v.Set("client_secret", tokenSecret.ClientSecret)
    +	return nil
    +}
    +
    +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
    +type ServicePrincipalCertificateSecret struct {
    +	Certificate *x509.Certificate
    +	PrivateKey  *rsa.PrivateKey
    +}
    +
    +// SignJwt returns the JWT signed with the certificate's private key.
    +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
    +	hasher := sha1.New()
    +	_, err := hasher.Write(secret.Certificate.Raw)
    +	if err != nil {
    +		return "", err
    +	}
    +
    +	thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
    +
    +	// The jti (JWT ID) claim provides a unique identifier for the JWT.
    +	jti := make([]byte, 20)
    +	_, err = rand.Read(jti)
    +	if err != nil {
    +		return "", err
    +	}
    +
    +	token := jwt.New(jwt.SigningMethodRS256)
    +	token.Header["x5t"] = thumbprint
    +	token.Claims = jwt.MapClaims{
    +		"aud": spt.oauthConfig.TokenEndpoint,
    +		"iss": spt.clientID,
    +		"sub": spt.clientID,
    +		"jti": base64.URLEncoding.EncodeToString(jti),
    +		"nbf": time.Now().Unix(),
    +		"exp": time.Now().Add(time.Hour * 24).Unix(),
    +	}
    +
    +	signedString, err := token.SignedString(secret.PrivateKey)
    +	return signedString, err
    +}
    +
    +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
    +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
    +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
    +	jwt, err := secret.SignJwt(spt)
    +	if err != nil {
    +		return err
    +	}
    +
    +	v.Set("client_assertion", jwt)
    +	v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
    +	return nil
    +}
    +
    +// ServicePrincipalToken encapsulates a Token created for a Service Principal.
    +type ServicePrincipalToken struct {
    +	Token
    +
    +	secret        ServicePrincipalSecret
    +	oauthConfig   OAuthConfig
    +	clientID      string
    +	resource      string
    +	autoRefresh   bool
    +	refreshWithin time.Duration
    +	sender        autorest.Sender
    +
    +	refreshCallbacks []TokenRefreshCallback
    +}
    +
    +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
    +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
    +	spt := &ServicePrincipalToken{
    +		oauthConfig:      oauthConfig,
    +		secret:           secret,
    +		clientID:         id,
    +		resource:         resource,
    +		autoRefresh:      true,
    +		refreshWithin:    defaultRefresh,
    +		sender:           &http.Client{},
    +		refreshCallbacks: callbacks,
    +	}
    +	return spt, nil
    +}
    +
    +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
    +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
    +	spt, err := NewServicePrincipalTokenWithSecret(
    +		oauthConfig,
    +		clientID,
    +		resource,
    +		&ServicePrincipalNoSecret{},
    +		callbacks...)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	spt.Token = token
    +
    +	return spt, nil
    +}
    +
    +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
    +// credentials scoped to the named resource.
    +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
    +	return NewServicePrincipalTokenWithSecret(
    +		oauthConfig,
    +		clientID,
    +		resource,
    +		&ServicePrincipalTokenSecret{
    +			ClientSecret: secret,
    +		},
    +		callbacks...,
    +	)
    +}
    +
    +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes.
    +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
    +	return NewServicePrincipalTokenWithSecret(
    +		oauthConfig,
    +		clientID,
    +		resource,
    +		&ServicePrincipalCertificateSecret{
    +			PrivateKey:  privateKey,
    +			Certificate: certificate,
    +		},
    +		callbacks...,
    +	)
    +}
    +
    +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
    +// RefreshWithin).
    +func (spt *ServicePrincipalToken) EnsureFresh() error {
    +	if spt.WillExpireIn(spt.refreshWithin) {
    +		return spt.Refresh()
    +	}
    +	return nil
    +}
    +
    +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
    +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
    +	if spt.refreshCallbacks != nil {
    +		for _, callback := range spt.refreshCallbacks {
    +			err := callback(spt.Token)
    +			if err != nil {
    +				return autorest.NewErrorWithError(err,
    +					"azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error")
    +			}
    +		}
    +	}
    +	return nil
    +}
    +
    +// Refresh obtains a fresh token for the Service Principal.
    +func (spt *ServicePrincipalToken) Refresh() error {
    +	return spt.refreshInternal(spt.resource)
    +}
    +
    +// RefreshExchange refreshes the token, but for a different resource.
    +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
    +	return spt.refreshInternal(resource)
    +}
    +
    +func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
    +	v := url.Values{}
    +	v.Set("client_id", spt.clientID)
    +	v.Set("resource", resource)
    +
    +	if spt.RefreshToken != "" {
    +		v.Set("grant_type", OAuthGrantTypeRefreshToken)
    +		v.Set("refresh_token", spt.RefreshToken)
    +	} else {
    +		v.Set("grant_type", OAuthGrantTypeClientCredentials)
    +		err := spt.secret.SetAuthenticationValues(spt, &v)
    +		if err != nil {
    +			return err
    +		}
    +	}
    +
    +	req, _ := autorest.Prepare(&http.Request{},
    +		autorest.AsPost(),
    +		autorest.AsFormURLEncoded(),
    +		autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()),
    +		autorest.WithFormData(v))
    +
    +	resp, err := autorest.SendWithSender(spt.sender, req)
    +	if err != nil {
    +		return autorest.NewErrorWithError(err,
    +			"azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s",
    +			spt.clientID)
    +	}
    +
    +	var newToken Token
    +	err = autorest.Respond(resp,
    +		autorest.WithErrorUnlessOK(),
    +		autorest.ByUnmarshallingJSON(&newToken),
    +		autorest.ByClosing())
    +	if err != nil {
    +		return autorest.NewErrorWithError(err,
    +			"azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request",
    +			spt.clientID)
    +	}
    +
    +	spt.Token = newToken
    +
    +	err = spt.InvokeRefreshCallbacks(newToken)
    +	if err != nil {
    +		// its already wrapped inside InvokeRefreshCallbacks
    +		return err
    +	}
    +
    +	return nil
    +}
    +
    +// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
    +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
    +	spt.autoRefresh = autoRefresh
    +}
    +
    +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
    +// refresh the token.
    +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
    +	spt.refreshWithin = d
    +	return
    +}
    +
    +// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An
    +// undecorated http.Client is used by default.
    +func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) {
    +	spt.sender = s
    +}
    +
    +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
    +// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken.
    +//
    +// By default, the token will automatically refresh if nearly expired (as determined by the
    +// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing
    +// tokens.
    +func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator {
    +	return func(p autorest.Preparer) autorest.Preparer {
    +		return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			if spt.autoRefresh {
    +				err := spt.EnsureFresh()
    +				if err != nil {
    +					return r, autorest.NewErrorWithError(err,
    +						"azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s",
    +						r.URL)
    +				}
    +			}
    +			return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r)
    +		})
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/client.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/client.go
    new file mode 100644
    index 0000000..b55b3d1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/client.go
    @@ -0,0 +1,212 @@
    +package autorest
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"log"
    +	"net/http"
    +	"net/http/cookiejar"
    +	"time"
    +)
    +
    +const (
    +	// DefaultPollingDelay is a reasonable delay between polling requests.
    +	DefaultPollingDelay = 60 * time.Second
    +
    +	// DefaultPollingDuration is a reasonable total polling duration.
    +	DefaultPollingDuration = 15 * time.Minute
    +
    +	// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
    +	DefaultRetryAttempts = 3
    +)
    +
    +var statusCodesForRetry = []int{
    +	http.StatusRequestTimeout,      // 408
    +	http.StatusInternalServerError, // 500
    +	http.StatusBadGateway,          // 502
    +	http.StatusServiceUnavailable,  // 503
    +	http.StatusGatewayTimeout,      // 504
    +}
    +
    +const (
    +	requestFormat = `HTTP Request Begin ===================================================
    +%s
    +===================================================== HTTP Request End
    +`
    +	responseFormat = `HTTP Response Begin ===================================================
    +%s
    +===================================================== HTTP Response End
    +`
    +)
    +
    +// Response serves as the base for all responses from generated clients. It provides access to the
    +// last http.Response.
    +type Response struct {
    +	*http.Response `json:"-"`
    +}
    +
    +// LoggingInspector implements request and response inspectors that log the full request and
    +// response to a supplied log.
    +type LoggingInspector struct {
    +	Logger *log.Logger
    +}
    +
    +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
    +// body is restored after being emitted.
    +//
    +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
    +// important. It is best used to trace JSON or similar body values.
    +func (li LoggingInspector) WithInspection() PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			var body, b bytes.Buffer
    +
    +			defer r.Body.Close()
    +
    +			r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
    +			if err := r.Write(&b); err != nil {
    +				return nil, fmt.Errorf("Failed to write response: %v", err)
    +			}
    +
    +			li.Logger.Printf(requestFormat, b.String())
    +
    +			r.Body = ioutil.NopCloser(&body)
    +			return p.Prepare(r)
    +		})
    +	}
    +}
    +
    +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
    +// body is restored after being emitted.
    +//
    +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
    +// important. It is best used to trace JSON or similar body values.
    +func (li LoggingInspector) ByInspecting() RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			var body, b bytes.Buffer
    +			defer resp.Body.Close()
    +			resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
    +			if err := resp.Write(&b); err != nil {
    +				return fmt.Errorf("Failed to write response: %v", err)
    +			}
    +
    +			li.Logger.Printf(responseFormat, b.String())
    +
    +			resp.Body = ioutil.NopCloser(&body)
    +			return r.Respond(resp)
    +		})
    +	}
    +}
    +
    +// Client is the base for autorest generated clients. It provides default, "do nothing"
    +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
    +// standard, undecorated http.Client as a default Sender.
    +//
    +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
    +// return responses that compose with Response.
    +//
    +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
    +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
    +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
    +// sending the request by providing a decorated Sender.
    +type Client struct {
    +	Authorizer        Authorizer
    +	Sender            Sender
    +	RequestInspector  PrepareDecorator
    +	ResponseInspector RespondDecorator
    +
    +	// PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
    +	PollingDelay time.Duration
    +
    +	// PollingDuration sets the maximum polling time after which an error is returned.
    +	PollingDuration time.Duration
    +
    +	// RetryAttempts sets the default number of retry attempts for client.
    +	RetryAttempts int
    +
    +	// RetryDuration sets the delay duration for retries.
    +	RetryDuration time.Duration
    +
    +	// UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
    +	// through the Do method.
    +	UserAgent string
    +
    +	Jar http.CookieJar
    +}
    +
    +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
    +// string.
    +func NewClientWithUserAgent(ua string) Client {
    +	return Client{
    +		PollingDelay:    DefaultPollingDelay,
    +		PollingDuration: DefaultPollingDuration,
    +		RetryAttempts:   DefaultRetryAttempts,
    +		RetryDuration:   30 * time.Second,
    +		UserAgent:       ua,
    +	}
    +}
    +
    +// Do implements the Sender interface by invoking the active Sender after applying authorization.
    +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
    +// is set, apply set the User-Agent header.
    +func (c Client) Do(r *http.Request) (*http.Response, error) {
    +	if r.UserAgent() == "" {
    +		r, _ = Prepare(r,
    +			WithUserAgent(c.UserAgent))
    +	}
    +	r, err := Prepare(r,
    +		c.WithInspection(),
    +		c.WithAuthorization())
    +	if err != nil {
    +		return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
    +	}
    +	resp, err := SendWithSender(c.sender(), r,
    +		DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...))
    +	Respond(resp,
    +		c.ByInspecting())
    +	return resp, err
    +}
    +
    +// sender returns the Sender to which to send requests.
    +func (c Client) sender() Sender {
    +	if c.Sender == nil {
    +		j, _ := cookiejar.New(nil)
    +		return &http.Client{Jar: j}
    +	}
    +	return c.Sender
    +}
    +
    +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
    +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
    +func (c Client) WithAuthorization() PrepareDecorator {
    +	return c.authorizer().WithAuthorization()
    +}
    +
    +// authorizer returns the Authorizer to use.
    +func (c Client) authorizer() Authorizer {
    +	if c.Authorizer == nil {
    +		return NullAuthorizer{}
    +	}
    +	return c.Authorizer
    +}
    +
    +// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
    +// if present, or returns the WithNothing PrepareDecorator otherwise.
    +func (c Client) WithInspection() PrepareDecorator {
    +	if c.RequestInspector == nil {
    +		return WithNothing()
    +	}
    +	return c.RequestInspector
    +}
    +
    +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
    +// if present, or returns the ByIgnoring RespondDecorator otherwise.
    +func (c Client) ByInspecting() RespondDecorator {
    +	if c.ResponseInspector == nil {
    +		return ByIgnoring()
    +	}
    +	return c.ResponseInspector
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/date.go
    new file mode 100644
    index 0000000..80ca60e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/date.go
    @@ -0,0 +1,82 @@
    +/*
    +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
    +defined date   formats: Date and DateTime. Both types may, in most cases, be used in lieu of
    +time.Time types. And both convert to time.Time through a ToTime method.
    +*/
    +package date
    +
    +import (
    +	"fmt"
    +	"time"
    +)
    +
    +const (
    +	fullDate     = "2006-01-02"
    +	fullDateJSON = `"2006-01-02"`
    +	dateFormat   = "%04d-%02d-%02d"
    +	jsonFormat   = `"%04d-%02d-%02d"`
    +)
    +
    +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +type Date struct {
    +	time.Time
    +}
    +
    +// ParseDate create a new Date from the passed string.
    +func ParseDate(date string) (d Date, err error) {
    +	return parseDate(date, fullDate)
    +}
    +
    +func parseDate(date string, format string) (Date, error) {
    +	d, err := time.Parse(format, date)
    +	return Date{Time: d}, err
    +}
    +
    +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +func (d Date) MarshalBinary() ([]byte, error) {
    +	return d.MarshalText()
    +}
    +
    +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +func (d *Date) UnmarshalBinary(data []byte) error {
    +	return d.UnmarshalText(data)
    +}
    +
    +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +func (d Date) MarshalJSON() (json []byte, err error) {
    +	return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil
    +}
    +
    +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +func (d *Date) UnmarshalJSON(data []byte) (err error) {
    +	d.Time, err = time.Parse(fullDateJSON, string(data))
    +	return err
    +}
    +
    +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +func (d Date) MarshalText() (text []byte, err error) {
    +	return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil
    +}
    +
    +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
    +// 2006-01-02).
    +func (d *Date) UnmarshalText(data []byte) (err error) {
    +	d.Time, err = time.Parse(fullDate, string(data))
    +	return err
    +}
    +
    +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
    +func (d Date) String() string {
    +	return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())
    +}
    +
    +// ToTime returns a Date as a time.Time
    +func (d Date) ToTime() time.Time {
    +	return d.Time
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/time.go
    new file mode 100644
    index 0000000..c1af629
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/time.go
    @@ -0,0 +1,89 @@
    +package date
    +
    +import (
    +	"regexp"
    +	"time"
    +)
    +
    +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
    +const (
    +	azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"`
    +	azureUtcFormat     = "2006-01-02T15:04:05.999999999"
    +	rfc3339JSON        = `"` + time.RFC3339Nano + `"`
    +	rfc3339            = time.RFC3339Nano
    +	tzOffsetRegex      = `(Z|z|\+|-)(\d+:\d+)*"*$`
    +)
    +
    +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
    +// 2006-01-02T15:04:05Z).
    +type Time struct {
    +	time.Time
    +}
    +
    +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
    +// 2006-01-02T15:04:05Z).
    +func (t Time) MarshalBinary() ([]byte, error) {
    +	return t.Time.MarshalText()
    +}
    +
    +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
    +// (i.e., 2006-01-02T15:04:05Z).
    +func (t *Time) UnmarshalBinary(data []byte) error {
    +	return t.UnmarshalText(data)
    +}
    +
    +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
    +// 2006-01-02T15:04:05Z).
    +func (t Time) MarshalJSON() (json []byte, err error) {
    +	return t.Time.MarshalJSON()
    +}
    +
    +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
    +// (i.e., 2006-01-02T15:04:05Z).
    +func (t *Time) UnmarshalJSON(data []byte) (err error) {
    +	timeFormat := azureUtcFormatJSON
    +	match, err := regexp.Match(tzOffsetRegex, data)
    +	if err != nil {
    +		return err
    +	} else if match {
    +		timeFormat = rfc3339JSON
    +	}
    +	t.Time, err = ParseTime(timeFormat, string(data))
    +	return err
    +}
    +
    +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
    +// 2006-01-02T15:04:05Z).
    +func (t Time) MarshalText() (text []byte, err error) {
    +	return t.Time.MarshalText()
    +}
    +
    +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
    +// (i.e., 2006-01-02T15:04:05Z).
    +func (t *Time) UnmarshalText(data []byte) (err error) {
    +	timeFormat := azureUtcFormat
    +	match, err := regexp.Match(tzOffsetRegex, data)
    +	if err != nil {
    +		return err
    +	} else if match {
    +		timeFormat = rfc3339
    +	}
    +	t.Time, err = ParseTime(timeFormat, string(data))
    +	return err
    +}
    +
    +// String returns the Time formatted as an RFC3339 date-time string (i.e.,
    +// 2006-01-02T15:04:05Z).
    +func (t Time) String() string {
    +	// Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
    +	b, err := t.MarshalText()
    +	if err != nil {
    +		return ""
    +	}
    +	return string(b)
    +}
    +
    +// ToTime returns a Time as a time.Time
    +func (t Time) ToTime() time.Time {
    +	return t.Time
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
    new file mode 100644
    index 0000000..11995fb
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
    @@ -0,0 +1,86 @@
    +package date
    +
    +import (
    +	"errors"
    +	"time"
    +)
    +
    +const (
    +	rfc1123JSON = `"` + time.RFC1123 + `"`
    +	rfc1123     = time.RFC1123
    +)
    +
    +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
    +// Mon, 02 Jan 2006 15:04:05 MST).
    +type TimeRFC1123 struct {
    +	time.Time
    +}
    +
    +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
    +// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
    +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) {
    +	t.Time, err = ParseTime(rfc1123JSON, string(data))
    +	if err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
    +// Mon, 02 Jan 2006 15:04:05 MST).
    +func (t TimeRFC1123) MarshalJSON() ([]byte, error) {
    +	if y := t.Year(); y < 0 || y >= 10000 {
    +		return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
    +	}
    +	b := []byte(t.Format(rfc1123JSON))
    +	return b, nil
    +}
    +
    +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
    +// Mon, 02 Jan 2006 15:04:05 MST).
    +func (t TimeRFC1123) MarshalText() ([]byte, error) {
    +	if y := t.Year(); y < 0 || y >= 10000 {
    +		return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
    +	}
    +
    +	b := []byte(t.Format(rfc1123))
    +	return b, nil
    +}
    +
    +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
    +// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
    +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) {
    +	t.Time, err = ParseTime(rfc1123, string(data))
    +	if err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
    +// Mon, 02 Jan 2006 15:04:05 MST).
    +func (t TimeRFC1123) MarshalBinary() ([]byte, error) {
    +	return t.MarshalText()
    +}
    +
    +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
    +// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
    +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error {
    +	return t.UnmarshalText(data)
    +}
    +
    +// ToTime returns a Time as a time.Time
    +func (t TimeRFC1123) ToTime() time.Time {
    +	return t.Time
    +}
    +
    +// String returns the Time formatted as an RFC1123 date-time string (i.e.,
    +// Mon, 02 Jan 2006 15:04:05 MST).
    +func (t TimeRFC1123) String() string {
    +	// Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
    +	b, err := t.MarshalText()
    +	if err != nil {
    +		return ""
    +	}
    +	return string(b)
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/utility.go
    new file mode 100644
    index 0000000..207b1a2
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/date/utility.go
    @@ -0,0 +1,11 @@
    +package date
    +
    +import (
    +	"strings"
    +	"time"
    +)
    +
    +// ParseTime to parse Time string to specified format.
    +func ParseTime(format string, t string) (d time.Time, err error) {
    +	return time.Parse(format, strings.ToUpper(t))
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/error.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/error.go
    new file mode 100644
    index 0000000..2e4fc79
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/error.go
    @@ -0,0 +1,77 @@
    +package autorest
    +
    +import (
    +	"fmt"
    +	"net/http"
    +)
    +
    +const (
    +	// UndefinedStatusCode is used when HTTP status code is not available for an error.
    +	UndefinedStatusCode = 0
    +)
    +
    +// DetailedError encloses a error with details of the package, method, and associated HTTP
    +// status code (if any).
    +type DetailedError struct {
    +	Original error
    +
    +	// PackageType is the package type of the object emitting the error. For types, the value
    +	// matches that produced the the '%T' format specifier of the fmt package. For other elements,
    +	// such as functions, it is just the package name (e.g., "autorest").
    +	PackageType string
    +
    +	// Method is the name of the method raising the error.
    +	Method string
    +
    +	// StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error.
    +	StatusCode interface{}
    +
    +	// Message is the error message.
    +	Message string
    +}
    +
    +// NewError creates a new Error conforming object from the passed packageType, method, and
    +// message. message is treated as a format string to which the optional args apply.
    +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError {
    +	return NewErrorWithError(nil, packageType, method, nil, message, args...)
    +}
    +
    +// NewErrorWithResponse creates a new Error conforming object from the passed
    +// packageType, method, statusCode of the given resp (UndefinedStatusCode if
    +// resp is nil), and message. message is treated as a format string to which the
    +// optional args apply.
    +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
    +	return NewErrorWithError(nil, packageType, method, resp, message, args...)
    +}
    +
    +// NewErrorWithError creates a new Error conforming object from the
    +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
    +// if resp is nil), message, and original error. message is treated as a format
    +// string to which the optional args apply.
    +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
    +	if v, ok := original.(DetailedError); ok {
    +		return v
    +	}
    +
    +	statusCode := UndefinedStatusCode
    +	if resp != nil {
    +		statusCode = resp.StatusCode
    +	}
    +
    +	return DetailedError{
    +		Original:    original,
    +		PackageType: packageType,
    +		Method:      method,
    +		StatusCode:  statusCode,
    +		Message:     fmt.Sprintf(message, args...),
    +	}
    +}
    +
    +// Error returns a formatted containing all available details (i.e., PackageType, Method,
    +// StatusCode, Message, and original error (if any)).
    +func (e DetailedError) Error() string {
    +	if e.Original == nil {
    +		return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode)
    +	}
    +	return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original)
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/preparer.go
    new file mode 100644
    index 0000000..5b2c527
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/preparer.go
    @@ -0,0 +1,433 @@
    +package autorest
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"mime/multipart"
    +	"net/http"
    +	"net/url"
    +	"strings"
    +)
    +
    +const (
    +	mimeTypeJSON     = "application/json"
    +	mimeTypeFormPost = "application/x-www-form-urlencoded"
    +
    +	headerAuthorization = "Authorization"
    +	headerContentType   = "Content-Type"
    +	headerUserAgent     = "User-Agent"
    +)
    +
    +// Preparer is the interface that wraps the Prepare method.
    +//
    +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
    +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used.
    +type Preparer interface {
    +	Prepare(*http.Request) (*http.Request, error)
    +}
    +
    +// PreparerFunc is a method that implements the Preparer interface.
    +type PreparerFunc func(*http.Request) (*http.Request, error)
    +
    +// Prepare implements the Preparer interface on PreparerFunc.
    +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) {
    +	return pf(r)
    +}
    +
    +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the
    +// http.Request and pass it along or, first, pass the http.Request along then affect the result.
    +type PrepareDecorator func(Preparer) Preparer
    +
    +// CreatePreparer creates, decorates, and returns a Preparer.
    +// Without decorators, the returned Preparer returns the passed http.Request unmodified.
    +// Preparers are safe to share and re-use.
    +func CreatePreparer(decorators ...PrepareDecorator) Preparer {
    +	return DecoratePreparer(
    +		Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })),
    +		decorators...)
    +}
    +
    +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it
    +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the
    +// request depends on whether they are a pre-decorator (change the http.Request and then pass it
    +// along) or a post-decorator (pass the http.Request along and alter it on return).
    +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer {
    +	for _, decorate := range decorators {
    +		p = decorate(p)
    +	}
    +	return p
    +}
    +
    +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators.
    +// It creates a Preparer from the decorators which it then applies to the passed http.Request.
    +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) {
    +	if r == nil {
    +		return nil, NewError("autorest", "Prepare", "Invoked without an http.Request")
    +	}
    +	return CreatePreparer(decorators...).Prepare(r)
    +}
    +
    +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed
    +// http.Request.
    +func WithNothing() PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			return p.Prepare(r)
    +		})
    +	}
    +}
    +
    +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to
    +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before
    +// adding the header.
    +func WithHeader(header string, value string) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				if r.Header == nil {
    +					r.Header = make(http.Header)
    +				}
    +				r.Header.Set(http.CanonicalHeaderKey(header), value)
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
    +// value is "Bearer " followed by the supplied token.
    +func WithBearerAuthorization(token string) PrepareDecorator {
    +	return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token))
    +}
    +
    +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value
    +// is the passed contentType.
    +func AsContentType(contentType string) PrepareDecorator {
    +	return WithHeader(headerContentType, contentType)
    +}
    +
    +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the
    +// passed string.
    +func WithUserAgent(ua string) PrepareDecorator {
    +	return WithHeader(headerUserAgent, ua)
    +}
    +
    +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
    +// "application/x-www-form-urlencoded".
    +func AsFormURLEncoded() PrepareDecorator {
    +	return AsContentType(mimeTypeFormPost)
    +}
    +
    +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
    +// "application/json".
    +func AsJSON() PrepareDecorator {
    +	return AsContentType(mimeTypeJSON)
    +}
    +
    +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
    +// decorator does not validate that the passed method string is a known HTTP method.
    +func WithMethod(method string) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r.Method = method
    +			return p.Prepare(r)
    +		})
    +	}
    +}
    +
    +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE.
    +func AsDelete() PrepareDecorator { return WithMethod("DELETE") }
    +
    +// AsGet returns a PrepareDecorator that sets the HTTP method to GET.
    +func AsGet() PrepareDecorator { return WithMethod("GET") }
    +
    +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
    +func AsHead() PrepareDecorator { return WithMethod("HEAD") }
    +
    +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
    +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
    +
    +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH.
    +func AsPatch() PrepareDecorator { return WithMethod("PATCH") }
    +
    +// AsPost returns a PrepareDecorator that sets the HTTP method to POST.
    +func AsPost() PrepareDecorator { return WithMethod("POST") }
    +
    +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT.
    +func AsPut() PrepareDecorator { return WithMethod("PUT") }
    +
    +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed
    +// from the supplied baseUrl.
    +func WithBaseURL(baseURL string) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				var u *url.URL
    +				if u, err = url.Parse(baseURL); err != nil {
    +					return r, err
    +				}
    +				if u.Scheme == "" {
    +					err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
    +				}
    +				if err == nil {
    +					r.URL = u
    +				}
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the
    +// http.Request body.
    +func WithFormData(v url.Values) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				s := v.Encode()
    +				r.ContentLength = int64(len(s))
    +				r.Body = ioutil.NopCloser(strings.NewReader(s))
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters
    +// into the http.Request body.
    +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				var body bytes.Buffer
    +				writer := multipart.NewWriter(&body)
    +				for key, value := range formDataParameters {
    +					if rc, ok := value.(io.ReadCloser); ok {
    +						var fd io.Writer
    +						if fd, err = writer.CreateFormFile(key, key); err != nil {
    +							return r, err
    +						}
    +						if _, err = io.Copy(fd, rc); err != nil {
    +							return r, err
    +						}
    +					} else {
    +						if err = writer.WriteField(key, ensureValueString(value)); err != nil {
    +							return r, err
    +						}
    +					}
    +				}
    +				if err = writer.Close(); err != nil {
    +					return r, err
    +				}
    +				if r.Header == nil {
    +					r.Header = make(http.Header)
    +				}
    +				r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
    +				r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
    +				r.ContentLength = int64(body.Len())
    +				return r, err
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithFile returns a PrepareDecorator that sends file in request body.
    +func WithFile(f io.ReadCloser) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				b, err := ioutil.ReadAll(f)
    +				if err != nil {
    +					return r, err
    +				}
    +				r.Body = ioutil.NopCloser(bytes.NewReader(b))
    +				r.ContentLength = int64(len(b))
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request
    +// and sets the Content-Length header.
    +func WithBool(v bool) PrepareDecorator {
    +	return WithString(fmt.Sprintf("%v", v))
    +}
    +
    +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the
    +// request and sets the Content-Length header.
    +func WithFloat32(v float32) PrepareDecorator {
    +	return WithString(fmt.Sprintf("%v", v))
    +}
    +
    +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the
    +// request and sets the Content-Length header.
    +func WithFloat64(v float64) PrepareDecorator {
    +	return WithString(fmt.Sprintf("%v", v))
    +}
    +
    +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request
    +// and sets the Content-Length header.
    +func WithInt32(v int32) PrepareDecorator {
    +	return WithString(fmt.Sprintf("%v", v))
    +}
    +
    +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request
    +// and sets the Content-Length header.
    +func WithInt64(v int64) PrepareDecorator {
    +	return WithString(fmt.Sprintf("%v", v))
    +}
    +
    +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request
    +// and sets the Content-Length header.
    +func WithString(v string) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				r.ContentLength = int64(len(v))
    +				r.Body = ioutil.NopCloser(strings.NewReader(v))
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the
    +// request and sets the Content-Length header.
    +func WithJSON(v interface{}) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				b, err := json.Marshal(v)
    +				if err == nil {
    +					r.ContentLength = int64(len(b))
    +					r.Body = ioutil.NopCloser(bytes.NewReader(b))
    +				}
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
    +// is absolute (that is, it begins with a "/"), it replaces the existing path.
    +func WithPath(path string) PrepareDecorator {
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				if r.URL == nil {
    +					return r, NewError("autorest", "WithPath", "Invoked with a nil URL")
    +				}
    +				if r.URL, err = parseURL(r.URL, path); err != nil {
    +					return r, err
    +				}
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
    +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The
    +// values will be escaped (aka URL encoded) before insertion into the path.
    +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
    +	parameters := escapeValueStrings(ensureValueStrings(pathParameters))
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				if r.URL == nil {
    +					return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL")
    +				}
    +				for key, value := range parameters {
    +					path = strings.Replace(path, "{"+key+"}", value, -1)
    +				}
    +				if r.URL, err = parseURL(r.URL, path); err != nil {
    +					return r, err
    +				}
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
    +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map.
    +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
    +	parameters := ensureValueStrings(pathParameters)
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				if r.URL == nil {
    +					return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL")
    +				}
    +				for key, value := range parameters {
    +					path = strings.Replace(path, "{"+key+"}", value, -1)
    +				}
    +
    +				if r.URL, err = parseURL(r.URL, path); err != nil {
    +					return r, err
    +				}
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +func parseURL(u *url.URL, path string) (*url.URL, error) {
    +	p := strings.TrimRight(u.String(), "/")
    +	if !strings.HasPrefix(path, "/") {
    +		path = "/" + path
    +	}
    +	return url.Parse(p + path)
    +}
    +
    +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
    +// given in the supplied map (i.e., key=value).
    +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
    +	parameters := ensureValueStrings(queryParameters)
    +	return func(p Preparer) Preparer {
    +		return PreparerFunc(func(r *http.Request) (*http.Request, error) {
    +			r, err := p.Prepare(r)
    +			if err == nil {
    +				if r.URL == nil {
    +					return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
    +				}
    +				v := r.URL.Query()
    +				for key, value := range parameters {
    +					v.Add(key, value)
    +				}
    +				r.URL.RawQuery = createQuery(v)
    +			}
    +			return r, err
    +		})
    +	}
    +}
    +
    +// Authorizer is the interface that provides a PrepareDecorator used to supply request
    +// authorization. Most often, the Authorizer decorator runs last so it has access to the full
    +// state of the formed HTTP request.
    +type Authorizer interface {
    +	WithAuthorization() PrepareDecorator
    +}
    +
    +// NullAuthorizer implements a default, "do nothing" Authorizer.
    +type NullAuthorizer struct{}
    +
    +// WithAuthorization returns a PrepareDecorator that does nothing.
    +func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
    +	return WithNothing()
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/responder.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/responder.go
    new file mode 100644
    index 0000000..e377ad4
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/responder.go
    @@ -0,0 +1,208 @@
    +package autorest
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"encoding/xml"
    +	"fmt"
    +	"io/ioutil"
    +	"net/http"
    +	"strings"
    +)
    +
    +// Responder is the interface that wraps the Respond method.
    +//
    +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold
    +// state since Responders may be shared and re-used.
    +type Responder interface {
    +	Respond(*http.Response) error
    +}
    +
    +// ResponderFunc is a method that implements the Responder interface.
    +type ResponderFunc func(*http.Response) error
    +
    +// Respond implements the Responder interface on ResponderFunc.
    +func (rf ResponderFunc) Respond(r *http.Response) error {
    +	return rf(r)
    +}
    +
    +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to
    +// the http.Response and pass it along or, first, pass the http.Response along then react.
    +type RespondDecorator func(Responder) Responder
    +
    +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned
    +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share
    +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes
    +// the response body is fine to share whereas a decorator that reads the body into a passed struct
    +// is not.
    +//
    +// To prevent memory leaks, ensure that at least one Responder closes the response body.
    +func CreateResponder(decorators ...RespondDecorator) Responder {
    +	return DecorateResponder(
    +		Responder(ResponderFunc(func(r *http.Response) error { return nil })),
    +		decorators...)
    +}
    +
    +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it
    +// applies to the Responder. Decorators are applied in the order received, but their affect upon the
    +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it
    +// along) or a post-decorator (pass the http.Response along and then react).
    +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder {
    +	for _, decorate := range decorators {
    +		r = decorate(r)
    +	}
    +	return r
    +}
    +
    +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators.
    +// It creates a Responder from the decorators it then applies to the passed http.Response.
    +func Respond(r *http.Response, decorators ...RespondDecorator) error {
    +	if r == nil {
    +		return nil
    +	}
    +	return CreateResponder(decorators...).Respond(r)
    +}
    +
    +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined
    +// to the next RespondDecorator.
    +func ByIgnoring() RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			return r.Respond(resp)
    +		})
    +	}
    +}
    +
    +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as
    +// the Body is read.
    +func ByCopying(b *bytes.Buffer) RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if err == nil && resp != nil && resp.Body != nil {
    +				resp.Body = TeeReadCloser(resp.Body, b)
    +			}
    +			return err
    +		})
    +	}
    +}
    +
    +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it
    +// closes the response body. Since the passed Responder is invoked prior to closing the response
    +// body, the decorator may occur anywhere within the set.
    +func ByClosing() RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if resp != nil && resp.Body != nil {
    +				if err := resp.Body.Close(); err != nil {
    +					return fmt.Errorf("Error closing the response body: %v", err)
    +				}
    +			}
    +			return err
    +		})
    +	}
    +}
    +
    +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which
    +// it closes the response if the passed Responder returns an error and the response body exists.
    +func ByClosingIfError() RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if err != nil && resp != nil && resp.Body != nil {
    +				if err := resp.Body.Close(); err != nil {
    +					return fmt.Errorf("Error closing the response body: %v", err)
    +				}
    +			}
    +			return err
    +		})
    +	}
    +}
    +
    +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
    +// response Body into the value pointed to by v.
    +func ByUnmarshallingJSON(v interface{}) RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if err == nil {
    +				b, errInner := ioutil.ReadAll(resp.Body)
    +				if errInner != nil {
    +					err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
    +				} else if len(strings.Trim(string(b), " ")) > 0 {
    +					errInner = json.Unmarshal(b, v)
    +					if errInner != nil {
    +						err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b))
    +					}
    +				}
    +			}
    +			return err
    +		})
    +	}
    +}
    +
    +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the
    +// response Body into the value pointed to by v.
    +func ByUnmarshallingXML(v interface{}) RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if err == nil {
    +				b, errInner := ioutil.ReadAll(resp.Body)
    +				if errInner != nil {
    +					err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
    +				} else {
    +					errInner = xml.Unmarshal(b, v)
    +					if errInner != nil {
    +						err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b))
    +					}
    +				}
    +			}
    +			return err
    +		})
    +	}
    +}
    +
    +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response
    +// StatusCode is among the set passed. Since these are artificial errors, the response body
    +// may still require closing.
    +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
    +	return func(r Responder) Responder {
    +		return ResponderFunc(func(resp *http.Response) error {
    +			err := r.Respond(resp)
    +			if err == nil && !ResponseHasStatusCode(resp, codes...) {
    +				err = NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
    +					resp.Request.Method,
    +					resp.Request.URL,
    +					resp.Status)
    +			}
    +			return err
    +		})
    +	}
    +}
    +
    +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is
    +// anything other than HTTP 200.
    +func WithErrorUnlessOK() RespondDecorator {
    +	return WithErrorUnlessStatusCode(http.StatusOK)
    +}
    +
    +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an
    +// empty string slice if the passed http.Response is nil or the header does not exist.
    +func ExtractHeader(header string, resp *http.Response) []string {
    +	if resp != nil && resp.Header != nil {
    +		return resp.Header[http.CanonicalHeaderKey(header)]
    +	}
    +	return nil
    +}
    +
    +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It
    +// returns an empty string if the passed http.Response is nil or the header does not exist.
    +func ExtractHeaderValue(header string, resp *http.Response) string {
    +	h := ExtractHeader(header, resp)
    +	if len(h) > 0 {
    +		return h[0]
    +	}
    +	return ""
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/sender.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/sender.go
    new file mode 100644
    index 0000000..93e6489
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/sender.go
    @@ -0,0 +1,267 @@
    +package autorest
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io/ioutil"
    +	"log"
    +	"math"
    +	"net/http"
    +	"time"
    +)
    +
    +// Sender is the interface that wraps the Do method to send HTTP requests.
    +//
    +// The standard http.Client conforms to this interface.
    +type Sender interface {
    +	Do(*http.Request) (*http.Response, error)
    +}
    +
    +// SenderFunc is a method that implements the Sender interface.
    +type SenderFunc func(*http.Request) (*http.Response, error)
    +
    +// Do implements the Sender interface on SenderFunc.
    +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
    +	return sf(r)
    +}
    +
    +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
    +// http.Request and pass it along or, first, pass the http.Request along then react to the
    +// http.Response result.
    +type SendDecorator func(Sender) Sender
    +
    +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
    +func CreateSender(decorators ...SendDecorator) Sender {
    +	return DecorateSender(&http.Client{}, decorators...)
    +}
    +
    +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
    +// the Sender. Decorators are applied in the order received, but their affect upon the request
    +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
    +// post-decorator (pass the http.Request along and react to the results in http.Response).
    +func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
    +	for _, decorate := range decorators {
    +		s = decorate(s)
    +	}
    +	return s
    +}
    +
    +// Send sends, by means of the default http.Client, the passed http.Request, returning the
    +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
    +// it will apply the http.Client before invoking the Do method.
    +//
    +// Send is a convenience method and not recommended for production. Advanced users should use
    +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client).
    +//
    +// Send will not poll or retry requests.
    +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
    +	return SendWithSender(&http.Client{}, r, decorators...)
    +}
    +
    +// SendWithSender sends the passed http.Request, through the provided Sender, returning the
    +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
    +// it will apply the http.Client before invoking the Do method.
    +//
    +// SendWithSender will not poll or retry requests.
    +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
    +	return DecorateSender(s, decorators...).Do(r)
    +}
    +
    +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
    +// invoking the Sender. The delay may be terminated by closing the optional channel on the
    +// http.Request. If canceled, no further Senders are invoked.
    +func AfterDelay(d time.Duration) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (*http.Response, error) {
    +			if !DelayForBackoff(d, 1, r.Cancel) {
    +				return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
    +			}
    +			return s.Do(r)
    +		})
    +	}
    +}
    +
    +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request.
    +func AsIs() SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (*http.Response, error) {
    +			return s.Do(r)
    +		})
    +	}
    +}
    +
    +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which
    +// it closes the response if the passed Sender returns an error and the response body exists.
    +func DoCloseIfError() SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (*http.Response, error) {
    +			resp, err := s.Do(r)
    +			if err != nil {
    +				Respond(resp, ByClosing())
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is
    +// among the set passed. Since these are artificial errors, the response body may still require
    +// closing.
    +func DoErrorIfStatusCode(codes ...int) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (*http.Response, error) {
    +			resp, err := s.Do(r)
    +			if err == nil && ResponseHasStatusCode(resp, codes...) {
    +				err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s",
    +					resp.Request.Method,
    +					resp.Request.URL,
    +					resp.Status)
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response
    +// StatusCode is among the set passed. Since these are artificial errors, the response body
    +// may still require closing.
    +func DoErrorUnlessStatusCode(codes ...int) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (*http.Response, error) {
    +			resp, err := s.Do(r)
    +			if err == nil && !ResponseHasStatusCode(resp, codes...) {
    +				err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s",
    +					resp.Request.Method,
    +					resp.Request.URL,
    +					resp.Status)
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the
    +// passed status codes. It expects the http.Response to contain a Location header providing the
    +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than
    +// the supplied duration. It will delay between requests for the duration specified in the
    +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
    +// closing the optional channel on the http.Request.
    +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
    +			resp, err = s.Do(r)
    +
    +			if err == nil && ResponseHasStatusCode(resp, codes...) {
    +				r, err = NewPollingRequest(resp, r.Cancel)
    +
    +				for err == nil && ResponseHasStatusCode(resp, codes...) {
    +					Respond(resp,
    +						ByClosing())
    +					resp, err = SendWithSender(s, r,
    +						AfterDelay(GetRetryAfter(resp, delay)))
    +				}
    +			}
    +
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified
    +// number of attempts, exponentially backing off between requests using the supplied backoff
    +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
    +// the http.Request.
    +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
    +			for attempt := 0; attempt < attempts; attempt++ {
    +				resp, err = s.Do(r)
    +				if err == nil {
    +					return resp, err
    +				}
    +				DelayForBackoff(backoff, attempt, r.Cancel)
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
    +// number of attempts, exponentially backing off between requests using the supplied backoff
    +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
    +// the http.Request.
    +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
    +			b := []byte{}
    +			if r.Body != nil {
    +				b, err = ioutil.ReadAll(r.Body)
    +				if err != nil {
    +					return resp, err
    +				}
    +			}
    +
    +			// Increment to add the first call (attempts denotes number of retries)
    +			attempts++
    +			for attempt := 0; attempt < attempts; attempt++ {
    +				r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
    +				resp, err = s.Do(r)
    +				if err != nil || !ResponseHasStatusCode(resp, codes...) {
    +					return resp, err
    +				}
    +				DelayForBackoff(backoff, attempt, r.Cancel)
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
    +// to or greater than the specified duration, exponentially backing off between requests using the
    +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
    +// optional channel on the http.Request.
    +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
    +			end := time.Now().Add(d)
    +			for attempt := 0; time.Now().Before(end); attempt++ {
    +				resp, err = s.Do(r)
    +				if err == nil {
    +					return resp, err
    +				}
    +				DelayForBackoff(backoff, attempt, r.Cancel)
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// WithLogging returns a SendDecorator that implements simple before and after logging of the
    +// request.
    +func WithLogging(logger *log.Logger) SendDecorator {
    +	return func(s Sender) Sender {
    +		return SenderFunc(func(r *http.Request) (*http.Response, error) {
    +			logger.Printf("Sending %s %s", r.Method, r.URL)
    +			resp, err := s.Do(r)
    +			if err != nil {
    +				logger.Printf("%s %s received error '%v'", r.Method, r.URL, err)
    +			} else {
    +				logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status)
    +			}
    +			return resp, err
    +		})
    +	}
    +}
    +
    +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of
    +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
    +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early,
    +// returns false.
    +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
    +	select {
    +	case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
    +		return true
    +	case <-cancel:
    +		return false
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
    new file mode 100644
    index 0000000..7b180b8
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
    @@ -0,0 +1,133 @@
    +/*
    +Package to provides helpers to ease working with pointer values of marshalled structures.
    +*/
    +package to
    +
    +// String returns a string value for the passed string pointer. It returns the empty string if the
    +// pointer is nil.
    +func String(s *string) string {
    +	if s != nil {
    +		return *s
    +	}
    +	return ""
    +}
    +
    +// StringPtr returns a pointer to the passed string.
    +func StringPtr(s string) *string {
    +	return &s
    +}
    +
    +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil
    +// slice if the pointer is nil.
    +func StringSlice(s *[]string) []string {
    +	if s != nil {
    +		return *s
    +	}
    +	return nil
    +}
    +
    +// StringSlicePtr returns a pointer to the passed string slice.
    +func StringSlicePtr(s []string) *[]string {
    +	return &s
    +}
    +
    +// StringMap returns a map of strings built from the map of string pointers. The empty string is
    +// used for nil pointers.
    +func StringMap(msp map[string]*string) map[string]string {
    +	ms := make(map[string]string, len(msp))
    +	for k, sp := range msp {
    +		if sp != nil {
    +			ms[k] = *sp
    +		} else {
    +			ms[k] = ""
    +		}
    +	}
    +	return ms
    +}
    +
    +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings.
    +func StringMapPtr(ms map[string]string) *map[string]*string {
    +	msp := make(map[string]*string, len(ms))
    +	for k, s := range ms {
    +		msp[k] = StringPtr(s)
    +	}
    +	return &msp
    +}
    +
    +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil.
    +func Bool(b *bool) bool {
    +	if b != nil {
    +		return *b
    +	}
    +	return false
    +}
    +
    +// BoolPtr returns a pointer to the passed bool.
    +func BoolPtr(b bool) *bool {
    +	return &b
    +}
    +
    +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil.
    +func Int(i *int) int {
    +	if i != nil {
    +		return *i
    +	}
    +	return 0
    +}
    +
    +// IntPtr returns a pointer to the passed int.
    +func IntPtr(i int) *int {
    +	return &i
    +}
    +
    +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil.
    +func Int32(i *int32) int32 {
    +	if i != nil {
    +		return *i
    +	}
    +	return 0
    +}
    +
    +// Int32Ptr returns a pointer to the passed int32.
    +func Int32Ptr(i int32) *int32 {
    +	return &i
    +}
    +
    +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil.
    +func Int64(i *int64) int64 {
    +	if i != nil {
    +		return *i
    +	}
    +	return 0
    +}
    +
    +// Int64Ptr returns a pointer to the passed int64.
    +func Int64Ptr(i int64) *int64 {
    +	return &i
    +}
    +
    +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil.
    +func Float32(i *float32) float32 {
    +	if i != nil {
    +		return *i
    +	}
    +	return 0.0
    +}
    +
    +// Float32Ptr returns a pointer to the passed float32.
    +func Float32Ptr(i float32) *float32 {
    +	return &i
    +}
    +
    +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil.
    +func Float64(i *float64) float64 {
    +	if i != nil {
    +		return *i
    +	}
    +	return 0.0
    +}
    +
    +// Float64Ptr returns a pointer to the passed float64.
    +func Float64Ptr(i float64) *float64 {
    +	return &i
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/utility.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/utility.go
    new file mode 100644
    index 0000000..7806714
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/utility.go
    @@ -0,0 +1,178 @@
    +package autorest
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"encoding/xml"
    +	"fmt"
    +	"io"
    +	"net/url"
    +	"reflect"
    +	"sort"
    +	"strings"
    +)
    +
    +// EncodedAs is a series of constants specifying various data encodings
    +type EncodedAs string
    +
    +const (
    +	// EncodedAsJSON states that data is encoded as JSON
    +	EncodedAsJSON EncodedAs = "JSON"
    +
    +	// EncodedAsXML states that data is encoded as Xml
    +	EncodedAsXML EncodedAs = "XML"
    +)
    +
    +// Decoder defines the decoding method json.Decoder and xml.Decoder share
    +type Decoder interface {
    +	Decode(v interface{}) error
    +}
    +
    +// NewDecoder creates a new decoder appropriate to the passed encoding.
    +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the
    +// encoded data.
    +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder {
    +	if encodedAs == EncodedAsJSON {
    +		return json.NewDecoder(r)
    +	} else if encodedAs == EncodedAsXML {
    +		return xml.NewDecoder(r)
    +	}
    +	return nil
    +}
    +
    +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy
    +// is especially useful if there is a chance the data will fail to decode.
    +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v
    +// is the decoding destination.
    +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) {
    +	b := bytes.Buffer{}
    +	return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
    +}
    +
    +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc.
    +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading.
    +// Further, when it is closed, it ensures that rc is closed as well.
    +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser {
    +	return &teeReadCloser{rc, io.TeeReader(rc, w)}
    +}
    +
    +type teeReadCloser struct {
    +	rc io.ReadCloser
    +	r  io.Reader
    +}
    +
    +func (t *teeReadCloser) Read(p []byte) (int, error) {
    +	return t.r.Read(p)
    +}
    +
    +func (t *teeReadCloser) Close() error {
    +	return t.rc.Close()
    +}
    +
    +func containsInt(ints []int, n int) bool {
    +	for _, i := range ints {
    +		if i == n {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +func escapeValueStrings(m map[string]string) map[string]string {
    +	for key, value := range m {
    +		m[key] = url.QueryEscape(value)
    +	}
    +	return m
    +}
    +
    +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string {
    +	mapOfStrings := make(map[string]string)
    +	for key, value := range mapOfInterface {
    +		mapOfStrings[key] = ensureValueString(value)
    +	}
    +	return mapOfStrings
    +}
    +
    +func ensureValueString(value interface{}) string {
    +	if value == nil {
    +		return ""
    +	}
    +	switch v := value.(type) {
    +	case string:
    +		return v
    +	case []byte:
    +		return string(v)
    +	default:
    +		return fmt.Sprintf("%v", v)
    +	}
    +}
    +
    +// MapToValues method converts map[string]interface{} to url.Values.
    +func MapToValues(m map[string]interface{}) url.Values {
    +	v := url.Values{}
    +	for key, value := range m {
    +		x := reflect.ValueOf(value)
    +		if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
    +			for i := 0; i < x.Len(); i++ {
    +				v.Add(key, ensureValueString(x.Index(i)))
    +			}
    +		} else {
    +			v.Add(key, ensureValueString(value))
    +		}
    +	}
    +	return v
    +}
    +
    +// String method converts interface v to string. If interface is a list, it
    +// joins list elements using separator.
    +func String(v interface{}, sep ...string) string {
    +	if len(sep) > 0 {
    +		return ensureValueString(strings.Join(v.([]string), sep[0]))
    +	}
    +	return ensureValueString(v)
    +}
    +
    +// Encode method encodes url path and query parameters.
    +func Encode(location string, v interface{}, sep ...string) string {
    +	s := String(v, sep...)
    +	switch strings.ToLower(location) {
    +	case "path":
    +		return pathEscape(s)
    +	case "query":
    +		return queryEscape(s)
    +	default:
    +		return s
    +	}
    +}
    +
    +func pathEscape(s string) string {
    +	return strings.Replace(url.QueryEscape(s), "+", "%20", -1)
    +}
    +
    +func queryEscape(s string) string {
    +	return url.QueryEscape(s)
    +}
    +
    +// This method is same as Encode() method of "net/url" go package,
    +// except it does not encode the query parameters because they
    +// already come encoded. It formats values map in query format (bar=foo&a=b).
    +func createQuery(v url.Values) string {
    +	var buf bytes.Buffer
    +	keys := make([]string, 0, len(v))
    +	for k := range v {
    +		keys = append(keys, k)
    +	}
    +	sort.Strings(keys)
    +	for _, k := range keys {
    +		vs := v[k]
    +		prefix := url.QueryEscape(k) + "="
    +		for _, v := range vs {
    +			if buf.Len() > 0 {
    +				buf.WriteByte('&')
    +			}
    +			buf.WriteString(prefix)
    +			buf.WriteString(v)
    +		}
    +	}
    +	return buf.String()
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
    new file mode 100644
    index 0000000..d7b0ead
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
    @@ -0,0 +1,373 @@
    +/*
    +Package validation provides methods for validating parameter value using reflection.
    +*/
    +package validation
    +
    +import (
    +	"fmt"
    +	"reflect"
    +	"regexp"
    +	"strings"
    +)
    +
    +// Constraint stores constraint name, target field name
    +// Rule and chain validations.
    +type Constraint struct {
    +
    +	// Target field name for validation.
    +	Target string
    +
    +	// Constraint name e.g. minLength, MaxLength, Pattern, etc.
    +	Name string
    +
    +	// Rule for constraint e.g. greater than 10, less than 5 etc.
    +	Rule interface{}
    +
    +	// Chain Validations for struct type
    +	Chain []Constraint
    +}
    +
    +// Validation stores parameter-wise validation.
    +type Validation struct {
    +	TargetValue interface{}
    +	Constraints []Constraint
    +}
    +
    +// Constraint list
    +const (
    +	Empty            = "Empty"
    +	Null             = "Null"
    +	ReadOnly         = "ReadOnly"
    +	Pattern          = "Pattern"
    +	MaxLength        = "MaxLength"
    +	MinLength        = "MinLength"
    +	MaxItems         = "MaxItems"
    +	MinItems         = "MinItems"
    +	MultipleOf       = "MultipleOf"
    +	UniqueItems      = "UniqueItems"
    +	InclusiveMaximum = "InclusiveMaximum"
    +	ExclusiveMaximum = "ExclusiveMaximum"
    +	ExclusiveMinimum = "ExclusiveMinimum"
    +	InclusiveMinimum = "InclusiveMinimum"
    +)
    +
    +// Validate method validates constraints on parameter
    +// passed in validation array.
    +func Validate(m []Validation) error {
    +	for _, item := range m {
    +		v := reflect.ValueOf(item.TargetValue)
    +		for _, constraint := range item.Constraints {
    +			var err error
    +			switch v.Kind() {
    +			case reflect.Ptr:
    +				err = validatePtr(v, constraint)
    +			case reflect.String:
    +				err = validateString(v, constraint)
    +			case reflect.Struct:
    +				err = validateStruct(v, constraint)
    +			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +				err = validateInt(v, constraint)
    +			case reflect.Float32, reflect.Float64:
    +				err = validateFloat(v, constraint)
    +			case reflect.Array, reflect.Slice, reflect.Map:
    +				err = validateArrayMap(v, constraint)
    +			default:
    +				err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
    +			}
    +
    +			if err != nil {
    +				return err
    +			}
    +		}
    +	}
    +	return nil
    +}
    +
    +func validateStruct(x reflect.Value, v Constraint, name ...string) error {
    +	//Get field name from target name which is in format a.b.c
    +	s := strings.Split(v.Target, ".")
    +	f := x.FieldByName(s[len(s)-1])
    +	if isZero(f) {
    +		return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target))
    +	}
    +
    +	if err := Validate([]Validation{
    +		{
    +			TargetValue: getInterfaceValue(f),
    +			Constraints: []Constraint{v},
    +		},
    +	}); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +func validatePtr(x reflect.Value, v Constraint) error {
    +	if v.Name == ReadOnly {
    +		if !x.IsNil() {
    +			return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request")
    +		}
    +		return nil
    +	}
    +	if x.IsNil() {
    +		return checkNil(x, v)
    +	}
    +	if v.Chain != nil {
    +		return Validate([]Validation{
    +			{
    +				TargetValue: getInterfaceValue(x.Elem()),
    +				Constraints: v.Chain,
    +			},
    +		})
    +	}
    +	return nil
    +}
    +
    +func validateInt(x reflect.Value, v Constraint) error {
    +	i := x.Int()
    +	r, ok := v.Rule.(int)
    +	if !ok {
    +		return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
    +	}
    +	switch v.Name {
    +	case MultipleOf:
    +		if i%int64(r) != 0 {
    +			return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r))
    +		}
    +	case ExclusiveMinimum:
    +		if i <= int64(r) {
    +			return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
    +		}
    +	case ExclusiveMaximum:
    +		if i >= int64(r) {
    +			return createError(x, v, fmt.Sprintf("value must be less than %v", r))
    +		}
    +	case InclusiveMinimum:
    +		if i < int64(r) {
    +			return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
    +		}
    +	case InclusiveMaximum:
    +		if i > int64(r) {
    +			return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
    +		}
    +	default:
    +		return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name))
    +	}
    +	return nil
    +}
    +
    +func validateFloat(x reflect.Value, v Constraint) error {
    +	f := x.Float()
    +	r, ok := v.Rule.(float64)
    +	if !ok {
    +		return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule))
    +	}
    +	switch v.Name {
    +	case ExclusiveMinimum:
    +		if f <= r {
    +			return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
    +		}
    +	case ExclusiveMaximum:
    +		if f >= r {
    +			return createError(x, v, fmt.Sprintf("value must be less than %v", r))
    +		}
    +	case InclusiveMinimum:
    +		if f < r {
    +			return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
    +		}
    +	case InclusiveMaximum:
    +		if f > r {
    +			return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
    +		}
    +	default:
    +		return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name))
    +	}
    +	return nil
    +}
    +
    +func validateString(x reflect.Value, v Constraint) error {
    +	s := x.String()
    +	switch v.Name {
    +	case Empty:
    +		if len(s) == 0 {
    +			return checkEmpty(x, v)
    +		}
    +	case Pattern:
    +		reg, err := regexp.Compile(v.Rule.(string))
    +		if err != nil {
    +			return createError(x, v, err.Error())
    +		}
    +		if !reg.MatchString(s) {
    +			return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule))
    +		}
    +	case MaxLength:
    +		if _, ok := v.Rule.(int); !ok {
    +			return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
    +		}
    +		if len(s) > v.Rule.(int) {
    +			return createError(x, v, fmt.Sprintf("value length must be less than %v", v.Rule))
    +		}
    +	case MinLength:
    +		if _, ok := v.Rule.(int); !ok {
    +			return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule))
    +		}
    +		if len(s) < v.Rule.(int) {
    +			return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.Rule))
    +		}
    +	case ReadOnly:
    +		if len(s) > 0 {
    +			return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request")
    +		}
    +	default:
    +		return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name))
    +	}
    +
    +	if v.Chain != nil {
    +		return Validate([]Validation{
    +			{
    +				TargetValue: getInterfaceValue(x),
    +				Constraints: v.Chain,
    +			},
    +		})
    +	}
    +	return nil
    +}
    +
    +func validateArrayMap(x reflect.Value, v Constraint) error {
    +	switch v.Name {
    +	case Null:
    +		if x.IsNil() {
    +			return checkNil(x, v)
    +		}
    +	case Empty:
    +		if x.IsNil() || x.Len() == 0 {
    +			return checkEmpty(x, v)
    +		}
    +	case MaxItems:
    +		if _, ok := v.Rule.(int); !ok {
    +			return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule))
    +		}
    +		if x.Len() > v.Rule.(int) {
    +			return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len()))
    +		}
    +	case MinItems:
    +		if _, ok := v.Rule.(int); !ok {
    +			return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule))
    +		}
    +		if x.Len() < v.Rule.(int) {
    +			return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len()))
    +		}
    +	case UniqueItems:
    +		if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
    +			if !checkForUniqueInArray(x) {
    +				return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x))
    +			}
    +		} else if x.Kind() == reflect.Map {
    +			if !checkForUniqueInMap(x) {
    +				return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x))
    +			}
    +		} else {
    +			return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind()))
    +		}
    +	case ReadOnly:
    +		if x.Len() != 0 {
    +			return createError(x, v, "readonly parameter; must send as nil or empty in request")
    +		}
    +	default:
    +		return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name))
    +	}
    +
    +	if v.Chain != nil {
    +		return Validate([]Validation{
    +			{
    +				TargetValue: getInterfaceValue(x),
    +				Constraints: v.Chain,
    +			},
    +		})
    +	}
    +	return nil
    +}
    +
    +func checkNil(x reflect.Value, v Constraint) error {
    +	if _, ok := v.Rule.(bool); !ok {
    +		return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule))
    +	}
    +	if v.Rule.(bool) {
    +		return createError(x, v, "value can not be null; required parameter")
    +	}
    +	return nil
    +}
    +
    +func checkEmpty(x reflect.Value, v Constraint) error {
    +	if _, ok := v.Rule.(bool); !ok {
    +		return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule))
    +	}
    +
    +	if v.Rule.(bool) {
    +		return createError(x, v, "value can not be null or empty; required parameter")
    +	}
    +	return nil
    +}
    +
    +func checkForUniqueInArray(x reflect.Value) bool {
    +	if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
    +		return false
    +	}
    +	arrOfInterface := make([]interface{}, x.Len())
    +
    +	for i := 0; i < x.Len(); i++ {
    +		arrOfInterface[i] = x.Index(i).Interface()
    +	}
    +
    +	m := make(map[interface{}]bool)
    +	for _, val := range arrOfInterface {
    +		if m[val] {
    +			return false
    +		}
    +		m[val] = true
    +	}
    +	return true
    +}
    +
    +func checkForUniqueInMap(x reflect.Value) bool {
    +	if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
    +		return false
    +	}
    +	mapOfInterface := make(map[interface{}]interface{}, x.Len())
    +
    +	keys := x.MapKeys()
    +	for _, k := range keys {
    +		mapOfInterface[k.Interface()] = x.MapIndex(k).Interface()
    +	}
    +
    +	m := make(map[interface{}]bool)
    +	for _, val := range mapOfInterface {
    +		if m[val] {
    +			return false
    +		}
    +		m[val] = true
    +	}
    +	return true
    +}
    +
    +func getInterfaceValue(x reflect.Value) interface{} {
    +	if x.Kind() == reflect.Invalid {
    +		return nil
    +	}
    +	return x.Interface()
    +}
    +
    +func isZero(x interface{}) bool {
    +	return x == reflect.Zero(reflect.TypeOf(x)).Interface()
    +}
    +
    +func createError(x reflect.Value, v Constraint, err string) error {
    +	return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s",
    +		v.Target, v.Name, getInterfaceValue(x), err)
    +}
    +
    +// NewErrorWithValidationError appends package type and method name in
    +// validation error.
    +func NewErrorWithValidationError(err error, packageType, method string) error {
    +	return fmt.Errorf("%s#%s: Invalid input: %v", packageType, method, err)
    +}
    diff --git a/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/version.go b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/version.go
    new file mode 100644
    index 0000000..8031a33
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/Azure/go-autorest/autorest/version.go
    @@ -0,0 +1,18 @@
    +package autorest
    +
    +import (
    +	"fmt"
    +)
    +
    +const (
    +	major        = "7"
    +	minor        = "0"
    +	patch        = "0"
    +	tag          = ""
    +	semVerFormat = "%s.%s.%s%s"
    +)
    +
    +// Version returns the semantic version (see http://semver.org).
    +func Version() string {
    +	return fmt.Sprintf(semVerFormat, major, minor, patch, tag)
    +}
    diff --git a/src/prometheus/vendor/github.com/PuerkitoBio/purell/LICENSE b/src/prometheus/vendor/github.com/PuerkitoBio/purell/LICENSE
    new file mode 100644
    index 0000000..4b9986d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/PuerkitoBio/purell/LICENSE
    @@ -0,0 +1,12 @@
    +Copyright (c) 2012, Martin Angers
    +All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
    +
    +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
    +
    +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
    +
    +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/src/prometheus/vendor/github.com/PuerkitoBio/purell/purell.go b/src/prometheus/vendor/github.com/PuerkitoBio/purell/purell.go
    new file mode 100644
    index 0000000..b79da64
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/PuerkitoBio/purell/purell.go
    @@ -0,0 +1,375 @@
    +/*
    +Package purell offers URL normalization as described on the wikipedia page:
    +http://en.wikipedia.org/wiki/URL_normalization
    +*/
    +package purell
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"net/url"
    +	"regexp"
    +	"sort"
    +	"strconv"
    +	"strings"
    +
    +	"github.com/PuerkitoBio/urlesc"
    +	"golang.org/x/net/idna"
    +	"golang.org/x/text/secure/precis"
    +	"golang.org/x/text/unicode/norm"
    +)
    +
    +// A set of normalization flags determines how a URL will
    +// be normalized.
    +type NormalizationFlags uint
    +
    +const (
    +	// Safe normalizations
    +	FlagLowercaseScheme           NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
    +	FlagLowercaseHost                                            // http://HOST -> http://host
    +	FlagUppercaseEscapes                                         // http://host/t%ef -> http://host/t%EF
    +	FlagDecodeUnnecessaryEscapes                                 // http://host/t%41 -> http://host/tA
    +	FlagEncodeNecessaryEscapes                                   // http://host/!"#$ -> http://host/%21%22#$
    +	FlagRemoveDefaultPort                                        // http://host:80 -> http://host
    +	FlagRemoveEmptyQuerySeparator                                // http://host/path? -> http://host/path
    +
    +	// Usually safe normalizations
    +	FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
    +	FlagAddTrailingSlash    // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
    +	FlagRemoveDotSegments   // http://host/path/./a/b/../c -> http://host/path/a/c
    +
    +	// Unsafe normalizations
    +	FlagRemoveDirectoryIndex   // http://host/path/index.html -> http://host/path/
    +	FlagRemoveFragment         // http://host/path#fragment -> http://host/path
    +	FlagForceHTTP              // https://host -> http://host
    +	FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
    +	FlagRemoveWWW              // http://www.host/ -> http://host/
    +	FlagAddWWW                 // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
    +	FlagSortQuery              // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
    +
    +	// Normalizations not in the wikipedia article, required to cover tests cases
    +	// submitted by jehiah
    +	FlagDecodeDWORDHost           // http://1113982867 -> http://66.102.7.147
    +	FlagDecodeOctalHost           // http://0102.0146.07.0223 -> http://66.102.7.147
    +	FlagDecodeHexHost             // http://0x42660793 -> http://66.102.7.147
    +	FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
    +	FlagRemoveEmptyPortSeparator  // http://host:/path -> http://host/path
    +
    +	// Convenience set of safe normalizations
    +	FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
    +
    +	// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
    +	// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
    +
    +	// Convenience set of usually safe normalizations (includes FlagsSafe)
    +	FlagsUsuallySafeGreedy    NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
    +	FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
    +
    +	// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
    +	FlagsUnsafeGreedy    NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
    +	FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
    +
    +	// Convenience set of all available flags
    +	FlagsAllGreedy    = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
    +	FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
    +)
    +
    +const (
    +	defaultHttpPort  = ":80"
    +	defaultHttpsPort = ":443"
    +)
    +
    +// Regular expressions used by the normalizations
    +var rxPort = regexp.MustCompile(`(:\d+)/?$`)
    +var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
    +var rxDupSlashes = regexp.MustCompile(`/{2,}`)
    +var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
    +var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
    +var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
    +var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
    +var rxEmptyPort = regexp.MustCompile(`:+$`)
    +
    +// Map of flags to implementation function.
    +// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
    +// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
    +
    +// Since maps have undefined traversing order, make a slice of ordered keys
    +var flagsOrder = []NormalizationFlags{
    +	FlagLowercaseScheme,
    +	FlagLowercaseHost,
    +	FlagRemoveDefaultPort,
    +	FlagRemoveDirectoryIndex,
    +	FlagRemoveDotSegments,
    +	FlagRemoveFragment,
    +	FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
    +	FlagRemoveDuplicateSlashes,
    +	FlagRemoveWWW,
    +	FlagAddWWW,
    +	FlagSortQuery,
    +	FlagDecodeDWORDHost,
    +	FlagDecodeOctalHost,
    +	FlagDecodeHexHost,
    +	FlagRemoveUnnecessaryHostDots,
    +	FlagRemoveEmptyPortSeparator,
    +	FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
    +	FlagAddTrailingSlash,
    +}
    +
    +// ... and then the map, where order is unimportant
    +var flags = map[NormalizationFlags]func(*url.URL){
    +	FlagLowercaseScheme:           lowercaseScheme,
    +	FlagLowercaseHost:             lowercaseHost,
    +	FlagRemoveDefaultPort:         removeDefaultPort,
    +	FlagRemoveDirectoryIndex:      removeDirectoryIndex,
    +	FlagRemoveDotSegments:         removeDotSegments,
    +	FlagRemoveFragment:            removeFragment,
    +	FlagForceHTTP:                 forceHTTP,
    +	FlagRemoveDuplicateSlashes:    removeDuplicateSlashes,
    +	FlagRemoveWWW:                 removeWWW,
    +	FlagAddWWW:                    addWWW,
    +	FlagSortQuery:                 sortQuery,
    +	FlagDecodeDWORDHost:           decodeDWORDHost,
    +	FlagDecodeOctalHost:           decodeOctalHost,
    +	FlagDecodeHexHost:             decodeHexHost,
    +	FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
    +	FlagRemoveEmptyPortSeparator:  removeEmptyPortSeparator,
    +	FlagRemoveTrailingSlash:       removeTrailingSlash,
    +	FlagAddTrailingSlash:          addTrailingSlash,
    +}
    +
    +// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
    +// It takes an URL string as input, as well as the normalization flags.
    +func MustNormalizeURLString(u string, f NormalizationFlags) string {
    +	result, e := NormalizeURLString(u, f)
    +	if e != nil {
    +		panic(e)
    +	}
    +	return result
    +}
    +
    +// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
    +// It takes an URL string as input, as well as the normalization flags.
    +func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
    +	if parsed, e := url.Parse(u); e != nil {
    +		return "", e
    +	} else {
    +		options := make([]precis.Option, 1, 3)
    +		options[0] = precis.IgnoreCase
    +		if f&FlagLowercaseHost == FlagLowercaseHost {
    +			options = append(options, precis.FoldCase())
    +		}
    +		options = append(options, precis.Norm(norm.NFC))
    +		profile := precis.NewFreeform(options...)
    +		if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
    +			return "", e
    +		}
    +		return NormalizeURL(parsed, f), nil
    +	}
    +	panic("Unreachable code.")
    +}
    +
    +// NormalizeURL returns the normalized string.
    +// It takes a parsed URL object as input, as well as the normalization flags.
    +func NormalizeURL(u *url.URL, f NormalizationFlags) string {
    +	for _, k := range flagsOrder {
    +		if f&k == k {
    +			flags[k](u)
    +		}
    +	}
    +	return urlesc.Escape(u)
    +}
    +
    +func lowercaseScheme(u *url.URL) {
    +	if len(u.Scheme) > 0 {
    +		u.Scheme = strings.ToLower(u.Scheme)
    +	}
    +}
    +
    +func lowercaseHost(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		u.Host = strings.ToLower(u.Host)
    +	}
    +}
    +
    +func removeDefaultPort(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		scheme := strings.ToLower(u.Scheme)
    +		u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
    +			if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
    +				return ""
    +			}
    +			return val
    +		})
    +	}
    +}
    +
    +func removeTrailingSlash(u *url.URL) {
    +	if l := len(u.Path); l > 0 {
    +		if strings.HasSuffix(u.Path, "/") {
    +			u.Path = u.Path[:l-1]
    +		}
    +	} else if l = len(u.Host); l > 0 {
    +		if strings.HasSuffix(u.Host, "/") {
    +			u.Host = u.Host[:l-1]
    +		}
    +	}
    +}
    +
    +func addTrailingSlash(u *url.URL) {
    +	if l := len(u.Path); l > 0 {
    +		if !strings.HasSuffix(u.Path, "/") {
    +			u.Path += "/"
    +		}
    +	} else if l = len(u.Host); l > 0 {
    +		if !strings.HasSuffix(u.Host, "/") {
    +			u.Host += "/"
    +		}
    +	}
    +}
    +
    +func removeDotSegments(u *url.URL) {
    +	if len(u.Path) > 0 {
    +		var dotFree []string
    +		var lastIsDot bool
    +
    +		sections := strings.Split(u.Path, "/")
    +		for _, s := range sections {
    +			if s == ".." {
    +				if len(dotFree) > 0 {
    +					dotFree = dotFree[:len(dotFree)-1]
    +				}
    +			} else if s != "." {
    +				dotFree = append(dotFree, s)
    +			}
    +			lastIsDot = (s == "." || s == "..")
    +		}
    +		// Special case if host does not end with / and new path does not begin with /
    +		u.Path = strings.Join(dotFree, "/")
    +		if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
    +			u.Path = "/" + u.Path
    +		}
    +		// Special case if the last segment was a dot, make sure the path ends with a slash
    +		if lastIsDot && !strings.HasSuffix(u.Path, "/") {
    +			u.Path += "/"
    +		}
    +	}
    +}
    +
    +func removeDirectoryIndex(u *url.URL) {
    +	if len(u.Path) > 0 {
    +		u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
    +	}
    +}
    +
    +func removeFragment(u *url.URL) {
    +	u.Fragment = ""
    +}
    +
    +func forceHTTP(u *url.URL) {
    +	if strings.ToLower(u.Scheme) == "https" {
    +		u.Scheme = "http"
    +	}
    +}
    +
    +func removeDuplicateSlashes(u *url.URL) {
    +	if len(u.Path) > 0 {
    +		u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
    +	}
    +}
    +
    +func removeWWW(u *url.URL) {
    +	if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
    +		u.Host = u.Host[4:]
    +	}
    +}
    +
    +func addWWW(u *url.URL) {
    +	if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
    +		u.Host = "www." + u.Host
    +	}
    +}
    +
    +func sortQuery(u *url.URL) {
    +	q := u.Query()
    +
    +	if len(q) > 0 {
    +		arKeys := make([]string, len(q))
    +		i := 0
    +		for k, _ := range q {
    +			arKeys[i] = k
    +			i++
    +		}
    +		sort.Strings(arKeys)
    +		buf := new(bytes.Buffer)
    +		for _, k := range arKeys {
    +			sort.Strings(q[k])
    +			for _, v := range q[k] {
    +				if buf.Len() > 0 {
    +					buf.WriteRune('&')
    +				}
    +				buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
    +			}
    +		}
    +
    +		// Rebuild the raw query string
    +		u.RawQuery = buf.String()
    +	}
    +}
    +
    +func decodeDWORDHost(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
    +			var parts [4]int64
    +
    +			dword, _ := strconv.ParseInt(matches[1], 10, 0)
    +			for i, shift := range []uint{24, 16, 8, 0} {
    +				parts[i] = dword >> shift & 0xFF
    +			}
    +			u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
    +		}
    +	}
    +}
    +
    +func decodeOctalHost(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
    +			var parts [4]int64
    +
    +			for i := 1; i <= 4; i++ {
    +				parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
    +			}
    +			u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
    +		}
    +	}
    +}
    +
    +func decodeHexHost(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
    +			// Conversion is safe because of regex validation
    +			parsed, _ := strconv.ParseInt(matches[1], 16, 0)
    +			// Set host as DWORD (base 10) encoded host
    +			u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
    +			// The rest is the same as decoding a DWORD host
    +			decodeDWORDHost(u)
    +		}
    +	}
    +}
    +
    +func removeUnncessaryHostDots(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
    +			// Trim the leading and trailing dots
    +			u.Host = strings.Trim(matches[1], ".")
    +			if len(matches) > 2 {
    +				u.Host += matches[2]
    +			}
    +		}
    +	}
    +}
    +
    +func removeEmptyPortSeparator(u *url.URL) {
    +	if len(u.Host) > 0 {
    +		u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/src/prometheus/vendor/github.com/PuerkitoBio/urlesc/LICENSE
    new file mode 100644
    index 0000000..7448756
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/PuerkitoBio/urlesc/LICENSE
    @@ -0,0 +1,27 @@
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/src/prometheus/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/src/prometheus/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
    new file mode 100644
    index 0000000..1b84624
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
    @@ -0,0 +1,180 @@
    +// Copyright 2009 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package urlesc implements query escaping as per RFC 3986.
    +// It contains some parts of the net/url package, modified so as to allow
    +// some reserved characters incorrectly escaped by net/url.
    +// See https://github.com/golang/go/issues/5684
    +package urlesc
    +
    +import (
    +	"bytes"
    +	"net/url"
    +	"strings"
    +)
    +
    +type encoding int
    +
    +const (
    +	encodePath encoding = 1 + iota
    +	encodeUserPassword
    +	encodeQueryComponent
    +	encodeFragment
    +)
    +
    +// Return true if the specified character should be escaped when
    +// appearing in a URL string, according to RFC 3986.
    +func shouldEscape(c byte, mode encoding) bool {
    +	// §2.3 Unreserved characters (alphanum)
    +	if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
    +		return false
    +	}
    +
    +	switch c {
    +	case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
    +		return false
    +
    +	// §2.2 Reserved characters (reserved)
    +	case ':', '/', '?', '#', '[', ']', '@', // gen-delims
    +		'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
    +		// Different sections of the URL allow a few of
    +		// the reserved characters to appear unescaped.
    +		switch mode {
    +		case encodePath: // §3.3
    +			// The RFC allows sub-delims and : @.
    +			// '/', '[' and ']' can be used to assign meaning to individual path
    +			// segments.  This package only manipulates the path as a whole,
    +			// so we allow those as well.  That leaves only ? and # to escape.
    +			return c == '?' || c == '#'
    +
    +		case encodeUserPassword: // §3.2.1
    +			// The RFC allows : and sub-delims in
    +			// userinfo.  The parsing of userinfo treats ':' as special so we must escape
    +			// all the gen-delims.
    +			return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
    +
    +		case encodeQueryComponent: // §3.4
    +			// The RFC allows / and ?.
    +			return c != '/' && c != '?'
    +
    +		case encodeFragment: // §4.1
    +			// The RFC text is silent but the grammar allows
    +			// everything, so escape nothing but #
    +			return c == '#'
    +		}
    +	}
    +
    +	// Everything else must be escaped.
    +	return true
    +}
    +
    +// QueryEscape escapes the string so it can be safely placed
    +// inside a URL query.
    +func QueryEscape(s string) string {
    +	return escape(s, encodeQueryComponent)
    +}
    +
    +func escape(s string, mode encoding) string {
    +	spaceCount, hexCount := 0, 0
    +	for i := 0; i < len(s); i++ {
    +		c := s[i]
    +		if shouldEscape(c, mode) {
    +			if c == ' ' && mode == encodeQueryComponent {
    +				spaceCount++
    +			} else {
    +				hexCount++
    +			}
    +		}
    +	}
    +
    +	if spaceCount == 0 && hexCount == 0 {
    +		return s
    +	}
    +
    +	t := make([]byte, len(s)+2*hexCount)
    +	j := 0
    +	for i := 0; i < len(s); i++ {
    +		switch c := s[i]; {
    +		case c == ' ' && mode == encodeQueryComponent:
    +			t[j] = '+'
    +			j++
    +		case shouldEscape(c, mode):
    +			t[j] = '%'
    +			t[j+1] = "0123456789ABCDEF"[c>>4]
    +			t[j+2] = "0123456789ABCDEF"[c&15]
    +			j += 3
    +		default:
    +			t[j] = s[i]
    +			j++
    +		}
    +	}
    +	return string(t)
    +}
    +
    +var uiReplacer = strings.NewReplacer(
    +	"%21", "!",
    +	"%27", "'",
    +	"%28", "(",
    +	"%29", ")",
    +	"%2A", "*",
    +)
    +
    +// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
    +func unescapeUserinfo(s string) string {
    +	return uiReplacer.Replace(s)
    +}
    +
    +// Escape reassembles the URL into a valid URL string.
    +// The general form of the result is one of:
    +//
    +//	scheme:opaque
    +//	scheme://userinfo@host/path?query#fragment
    +//
    +// If u.Opaque is non-empty, String uses the first form;
    +// otherwise it uses the second form.
    +//
    +// In the second form, the following rules apply:
    +//	- if u.Scheme is empty, scheme: is omitted.
    +//	- if u.User is nil, userinfo@ is omitted.
    +//	- if u.Host is empty, host/ is omitted.
    +//	- if u.Scheme and u.Host are empty and u.User is nil,
    +//	   the entire scheme://userinfo@host/ is omitted.
    +//	- if u.Host is non-empty and u.Path begins with a /,
    +//	   the form host/path does not add its own /.
    +//	- if u.RawQuery is empty, ?query is omitted.
    +//	- if u.Fragment is empty, #fragment is omitted.
    +func Escape(u *url.URL) string {
    +	var buf bytes.Buffer
    +	if u.Scheme != "" {
    +		buf.WriteString(u.Scheme)
    +		buf.WriteByte(':')
    +	}
    +	if u.Opaque != "" {
    +		buf.WriteString(u.Opaque)
    +	} else {
    +		if u.Scheme != "" || u.Host != "" || u.User != nil {
    +			buf.WriteString("//")
    +			if ui := u.User; ui != nil {
    +				buf.WriteString(unescapeUserinfo(ui.String()))
    +				buf.WriteByte('@')
    +			}
    +			if h := u.Host; h != "" {
    +				buf.WriteString(h)
    +			}
    +		}
    +		if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
    +			buf.WriteByte('/')
    +		}
    +		buf.WriteString(escape(u.Path, encodePath))
    +	}
    +	if u.RawQuery != "" {
    +		buf.WriteByte('?')
    +		buf.WriteString(u.RawQuery)
    +	}
    +	if u.Fragment != "" {
    +		buf.WriteByte('#')
    +		buf.WriteString(escape(u.Fragment, encodeFragment))
    +	}
    +	return buf.String()
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/LICENSE b/src/prometheus/vendor/github.com/alecthomas/template/LICENSE
    new file mode 100644
    index 0000000..7448756
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/LICENSE
    @@ -0,0 +1,27 @@
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/README.md b/src/prometheus/vendor/github.com/alecthomas/template/README.md
    new file mode 100644
    index 0000000..ef6a8ee
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/README.md
    @@ -0,0 +1,25 @@
    +# Go's `text/template` package with newline elision
    +
    +This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
    +
    +eg.
    +
    +```
    +{{if true}}\
    +hello
    +{{end}}\
    +```
    +
    +Will result in:
    +
    +```
    +hello\n
    +```
    +
    +Rather than:
    +
    +```
    +\n
    +hello\n
    +\n
    +```
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/doc.go b/src/prometheus/vendor/github.com/alecthomas/template/doc.go
    new file mode 100644
    index 0000000..223c595
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/doc.go
    @@ -0,0 +1,406 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +/*
    +Package template implements data-driven templates for generating textual output.
    +
    +To generate HTML output, see package html/template, which has the same interface
    +as this package but automatically secures HTML output against certain attacks.
    +
    +Templates are executed by applying them to a data structure. Annotations in the
    +template refer to elements of the data structure (typically a field of a struct
    +or a key in a map) to control execution and derive values to be displayed.
    +Execution of the template walks the structure and sets the cursor, represented
    +by a period '.' and called "dot", to the value at the current location in the
    +structure as execution proceeds.
    +
    +The input text for a template is UTF-8-encoded text in any format.
    +"Actions"--data evaluations or control structures--are delimited by
    +"{{" and "}}"; all text outside actions is copied to the output unchanged.
    +Actions may not span newlines, although comments can.
    +
    +Once parsed, a template may be executed safely in parallel.
    +
    +Here is a trivial example that prints "17 items are made of wool".
    +
    +	type Inventory struct {
    +		Material string
    +		Count    uint
    +	}
    +	sweaters := Inventory{"wool", 17}
    +	tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
    +	if err != nil { panic(err) }
    +	err = tmpl.Execute(os.Stdout, sweaters)
    +	if err != nil { panic(err) }
    +
    +More intricate examples appear below.
    +
    +Actions
    +
    +Here is the list of actions. "Arguments" and "pipelines" are evaluations of
    +data, defined in detail below.
    +
    +*/
    +//	{{/* a comment */}}
    +//		A comment; discarded. May contain newlines.
    +//		Comments do not nest and must start and end at the
    +//		delimiters, as shown here.
    +/*
    +
    +	{{pipeline}}
    +		The default textual representation of the value of the pipeline
    +		is copied to the output.
    +
    +	{{if pipeline}} T1 {{end}}
    +		If the value of the pipeline is empty, no output is generated;
    +		otherwise, T1 is executed.  The empty values are false, 0, any
    +		nil pointer or interface value, and any array, slice, map, or
    +		string of length zero.
    +		Dot is unaffected.
    +
    +	{{if pipeline}} T1 {{else}} T0 {{end}}
    +		If the value of the pipeline is empty, T0 is executed;
    +		otherwise, T1 is executed.  Dot is unaffected.
    +
    +	{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
    +		To simplify the appearance of if-else chains, the else action
    +		of an if may include another if directly; the effect is exactly
    +		the same as writing
    +			{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
    +
    +	{{range pipeline}} T1 {{end}}
    +		The value of the pipeline must be an array, slice, map, or channel.
    +		If the value of the pipeline has length zero, nothing is output;
    +		otherwise, dot is set to the successive elements of the array,
    +		slice, or map and T1 is executed. If the value is a map and the
    +		keys are of basic type with a defined order ("comparable"), the
    +		elements will be visited in sorted key order.
    +
    +	{{range pipeline}} T1 {{else}} T0 {{end}}
    +		The value of the pipeline must be an array, slice, map, or channel.
    +		If the value of the pipeline has length zero, dot is unaffected and
    +		T0 is executed; otherwise, dot is set to the successive elements
    +		of the array, slice, or map and T1 is executed.
    +
    +	{{template "name"}}
    +		The template with the specified name is executed with nil data.
    +
    +	{{template "name" pipeline}}
    +		The template with the specified name is executed with dot set
    +		to the value of the pipeline.
    +
    +	{{with pipeline}} T1 {{end}}
    +		If the value of the pipeline is empty, no output is generated;
    +		otherwise, dot is set to the value of the pipeline and T1 is
    +		executed.
    +
    +	{{with pipeline}} T1 {{else}} T0 {{end}}
    +		If the value of the pipeline is empty, dot is unaffected and T0
    +		is executed; otherwise, dot is set to the value of the pipeline
    +		and T1 is executed.
    +
    +Arguments
    +
    +An argument is a simple value, denoted by one of the following.
    +
    +	- A boolean, string, character, integer, floating-point, imaginary
    +	  or complex constant in Go syntax. These behave like Go's untyped
    +	  constants, although raw strings may not span newlines.
    +	- The keyword nil, representing an untyped Go nil.
    +	- The character '.' (period):
    +		.
    +	  The result is the value of dot.
    +	- A variable name, which is a (possibly empty) alphanumeric string
    +	  preceded by a dollar sign, such as
    +		$piOver2
    +	  or
    +		$
    +	  The result is the value of the variable.
    +	  Variables are described below.
    +	- The name of a field of the data, which must be a struct, preceded
    +	  by a period, such as
    +		.Field
    +	  The result is the value of the field. Field invocations may be
    +	  chained:
    +	    .Field1.Field2
    +	  Fields can also be evaluated on variables, including chaining:
    +	    $x.Field1.Field2
    +	- The name of a key of the data, which must be a map, preceded
    +	  by a period, such as
    +		.Key
    +	  The result is the map element value indexed by the key.
    +	  Key invocations may be chained and combined with fields to any
    +	  depth:
    +	    .Field1.Key1.Field2.Key2
    +	  Although the key must be an alphanumeric identifier, unlike with
    +	  field names they do not need to start with an upper case letter.
    +	  Keys can also be evaluated on variables, including chaining:
    +	    $x.key1.key2
    +	- The name of a niladic method of the data, preceded by a period,
    +	  such as
    +		.Method
    +	  The result is the value of invoking the method with dot as the
    +	  receiver, dot.Method(). Such a method must have one return value (of
    +	  any type) or two return values, the second of which is an error.
    +	  If it has two and the returned error is non-nil, execution terminates
    +	  and an error is returned to the caller as the value of Execute.
    +	  Method invocations may be chained and combined with fields and keys
    +	  to any depth:
    +	    .Field1.Key1.Method1.Field2.Key2.Method2
    +	  Methods can also be evaluated on variables, including chaining:
    +	    $x.Method1.Field
    +	- The name of a niladic function, such as
    +		fun
    +	  The result is the value of invoking the function, fun(). The return
    +	  types and values behave as in methods. Functions and function
    +	  names are described below.
    +	- A parenthesized instance of one the above, for grouping. The result
    +	  may be accessed by a field or map key invocation.
    +		print (.F1 arg1) (.F2 arg2)
    +		(.StructValuedMethod "arg").Field
    +
    +Arguments may evaluate to any type; if they are pointers the implementation
    +automatically indirects to the base type when required.
    +If an evaluation yields a function value, such as a function-valued
    +field of a struct, the function is not invoked automatically, but it
    +can be used as a truth value for an if action and the like. To invoke
    +it, use the call function, defined below.
    +
    +A pipeline is a possibly chained sequence of "commands". A command is a simple
    +value (argument) or a function or method call, possibly with multiple arguments:
    +
    +	Argument
    +		The result is the value of evaluating the argument.
    +	.Method [Argument...]
    +		The method can be alone or the last element of a chain but,
    +		unlike methods in the middle of a chain, it can take arguments.
    +		The result is the value of calling the method with the
    +		arguments:
    +			dot.Method(Argument1, etc.)
    +	functionName [Argument...]
    +		The result is the value of calling the function associated
    +		with the name:
    +			function(Argument1, etc.)
    +		Functions and function names are described below.
    +
    +Pipelines
    +
    +A pipeline may be "chained" by separating a sequence of commands with pipeline
    +characters '|'. In a chained pipeline, the result of the each command is
    +passed as the last argument of the following command. The output of the final
    +command in the pipeline is the value of the pipeline.
    +
    +The output of a command will be either one value or two values, the second of
    +which has type error. If that second value is present and evaluates to
    +non-nil, execution terminates and the error is returned to the caller of
    +Execute.
    +
    +Variables
    +
    +A pipeline inside an action may initialize a variable to capture the result.
    +The initialization has syntax
    +
    +	$variable := pipeline
    +
    +where $variable is the name of the variable. An action that declares a
    +variable produces no output.
    +
    +If a "range" action initializes a variable, the variable is set to the
    +successive elements of the iteration.  Also, a "range" may declare two
    +variables, separated by a comma:
    +
    +	range $index, $element := pipeline
    +
    +in which case $index and $element are set to the successive values of the
    +array/slice index or map key and element, respectively.  Note that if there is
    +only one variable, it is assigned the element; this is opposite to the
    +convention in Go range clauses.
    +
    +A variable's scope extends to the "end" action of the control structure ("if",
    +"with", or "range") in which it is declared, or to the end of the template if
    +there is no such control structure.  A template invocation does not inherit
    +variables from the point of its invocation.
    +
    +When execution begins, $ is set to the data argument passed to Execute, that is,
    +to the starting value of dot.
    +
    +Examples
    +
    +Here are some example one-line templates demonstrating pipelines and variables.
    +All produce the quoted word "output":
    +
    +	{{"\"output\""}}
    +		A string constant.
    +	{{`"output"`}}
    +		A raw string constant.
    +	{{printf "%q" "output"}}
    +		A function call.
    +	{{"output" | printf "%q"}}
    +		A function call whose final argument comes from the previous
    +		command.
    +	{{printf "%q" (print "out" "put")}}
    +		A parenthesized argument.
    +	{{"put" | printf "%s%s" "out" | printf "%q"}}
    +		A more elaborate call.
    +	{{"output" | printf "%s" | printf "%q"}}
    +		A longer chain.
    +	{{with "output"}}{{printf "%q" .}}{{end}}
    +		A with action using dot.
    +	{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
    +		A with action that creates and uses a variable.
    +	{{with $x := "output"}}{{printf "%q" $x}}{{end}}
    +		A with action that uses the variable in another action.
    +	{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
    +		The same, but pipelined.
    +
    +Functions
    +
    +During execution functions are found in two function maps: first in the
    +template, then in the global function map. By default, no functions are defined
    +in the template but the Funcs method can be used to add them.
    +
    +Predefined global functions are named as follows.
    +
    +	and
    +		Returns the boolean AND of its arguments by returning the
    +		first empty argument or the last argument, that is,
    +		"and x y" behaves as "if x then y else x". All the
    +		arguments are evaluated.
    +	call
    +		Returns the result of calling the first argument, which
    +		must be a function, with the remaining arguments as parameters.
    +		Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
    +		Y is a func-valued field, map entry, or the like.
    +		The first argument must be the result of an evaluation
    +		that yields a value of function type (as distinct from
    +		a predefined function such as print). The function must
    +		return either one or two result values, the second of which
    +		is of type error. If the arguments don't match the function
    +		or the returned error value is non-nil, execution stops.
    +	html
    +		Returns the escaped HTML equivalent of the textual
    +		representation of its arguments.
    +	index
    +		Returns the result of indexing its first argument by the
    +		following arguments. Thus "index x 1 2 3" is, in Go syntax,
    +		x[1][2][3]. Each indexed item must be a map, slice, or array.
    +	js
    +		Returns the escaped JavaScript equivalent of the textual
    +		representation of its arguments.
    +	len
    +		Returns the integer length of its argument.
    +	not
    +		Returns the boolean negation of its single argument.
    +	or
    +		Returns the boolean OR of its arguments by returning the
    +		first non-empty argument or the last argument, that is,
    +		"or x y" behaves as "if x then x else y". All the
    +		arguments are evaluated.
    +	print
    +		An alias for fmt.Sprint
    +	printf
    +		An alias for fmt.Sprintf
    +	println
    +		An alias for fmt.Sprintln
    +	urlquery
    +		Returns the escaped value of the textual representation of
    +		its arguments in a form suitable for embedding in a URL query.
    +
    +The boolean functions take any zero value to be false and a non-zero
    +value to be true.
    +
    +There is also a set of binary comparison operators defined as
    +functions:
    +
    +	eq
    +		Returns the boolean truth of arg1 == arg2
    +	ne
    +		Returns the boolean truth of arg1 != arg2
    +	lt
    +		Returns the boolean truth of arg1 < arg2
    +	le
    +		Returns the boolean truth of arg1 <= arg2
    +	gt
    +		Returns the boolean truth of arg1 > arg2
    +	ge
    +		Returns the boolean truth of arg1 >= arg2
    +
    +For simpler multi-way equality tests, eq (only) accepts two or more
    +arguments and compares the second and subsequent to the first,
    +returning in effect
    +
    +	arg1==arg2 || arg1==arg3 || arg1==arg4 ...
    +
    +(Unlike with || in Go, however, eq is a function call and all the
    +arguments will be evaluated.)
    +
    +The comparison functions work on basic types only (or named basic
    +types, such as "type Celsius float32"). They implement the Go rules
    +for comparison of values, except that size and exact type are
    +ignored, so any integer value, signed or unsigned, may be compared
    +with any other integer value. (The arithmetic value is compared,
    +not the bit pattern, so all negative integers are less than all
    +unsigned integers.) However, as usual, one may not compare an int
    +with a float32 and so on.
    +
    +Associated templates
    +
    +Each template is named by a string specified when it is created. Also, each
    +template is associated with zero or more other templates that it may invoke by
    +name; such associations are transitive and form a name space of templates.
    +
    +A template may use a template invocation to instantiate another associated
    +template; see the explanation of the "template" action above. The name must be
    +that of a template associated with the template that contains the invocation.
    +
    +Nested template definitions
    +
    +When parsing a template, another template may be defined and associated with the
    +template being parsed. Template definitions must appear at the top level of the
    +template, much like global variables in a Go program.
    +
    +The syntax of such definitions is to surround each template declaration with a
    +"define" and "end" action.
    +
    +The define action names the template being created by providing a string
    +constant. Here is a simple example:
    +
    +	`{{define "T1"}}ONE{{end}}
    +	{{define "T2"}}TWO{{end}}
    +	{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
    +	{{template "T3"}}`
    +
    +This defines two templates, T1 and T2, and a third T3 that invokes the other two
    +when it is executed. Finally it invokes T3. If executed this template will
    +produce the text
    +
    +	ONE TWO
    +
    +By construction, a template may reside in only one association. If it's
    +necessary to have a template addressable from multiple associations, the
    +template definition must be parsed multiple times to create distinct *Template
    +values, or must be copied with the Clone or AddParseTree method.
    +
    +Parse may be called multiple times to assemble the various associated templates;
    +see the ParseFiles and ParseGlob functions and methods for simple ways to parse
    +related templates stored in files.
    +
    +A template may be executed directly or through ExecuteTemplate, which executes
    +an associated template identified by name. To invoke our example above, we
    +might write,
    +
    +	err := tmpl.Execute(os.Stdout, "no data needed")
    +	if err != nil {
    +		log.Fatalf("execution failed: %s", err)
    +	}
    +
    +or to invoke a particular template explicitly by name,
    +
    +	err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
    +	if err != nil {
    +		log.Fatalf("execution failed: %s", err)
    +	}
    +
    +*/
    +package template
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/exec.go b/src/prometheus/vendor/github.com/alecthomas/template/exec.go
    new file mode 100644
    index 0000000..c3078e5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/exec.go
    @@ -0,0 +1,845 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package template
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"reflect"
    +	"runtime"
    +	"sort"
    +	"strings"
    +
    +	"github.com/alecthomas/template/parse"
    +)
    +
    +// state represents the state of an execution. It's not part of the
    +// template so that multiple executions of the same template
    +// can execute in parallel.
    +type state struct {
    +	tmpl *Template
    +	wr   io.Writer
    +	node parse.Node // current node, for errors
    +	vars []variable // push-down stack of variable values.
    +}
    +
    +// variable holds the dynamic value of a variable such as $, $x etc.
    +type variable struct {
    +	name  string
    +	value reflect.Value
    +}
    +
    +// push pushes a new variable on the stack.
    +func (s *state) push(name string, value reflect.Value) {
    +	s.vars = append(s.vars, variable{name, value})
    +}
    +
    +// mark returns the length of the variable stack.
    +func (s *state) mark() int {
    +	return len(s.vars)
    +}
    +
    +// pop pops the variable stack up to the mark.
    +func (s *state) pop(mark int) {
    +	s.vars = s.vars[0:mark]
    +}
    +
    +// setVar overwrites the top-nth variable on the stack. Used by range iterations.
    +func (s *state) setVar(n int, value reflect.Value) {
    +	s.vars[len(s.vars)-n].value = value
    +}
    +
    +// varValue returns the value of the named variable.
    +func (s *state) varValue(name string) reflect.Value {
    +	for i := s.mark() - 1; i >= 0; i-- {
    +		if s.vars[i].name == name {
    +			return s.vars[i].value
    +		}
    +	}
    +	s.errorf("undefined variable: %s", name)
    +	return zero
    +}
    +
    +var zero reflect.Value
    +
    +// at marks the state to be on node n, for error reporting.
    +func (s *state) at(node parse.Node) {
    +	s.node = node
    +}
    +
    +// doublePercent returns the string with %'s replaced by %%, if necessary,
    +// so it can be used safely inside a Printf format string.
    +func doublePercent(str string) string {
    +	if strings.Contains(str, "%") {
    +		str = strings.Replace(str, "%", "%%", -1)
    +	}
    +	return str
    +}
    +
    +// errorf formats the error and terminates processing.
    +func (s *state) errorf(format string, args ...interface{}) {
    +	name := doublePercent(s.tmpl.Name())
    +	if s.node == nil {
    +		format = fmt.Sprintf("template: %s: %s", name, format)
    +	} else {
    +		location, context := s.tmpl.ErrorContext(s.node)
    +		format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
    +	}
    +	panic(fmt.Errorf(format, args...))
    +}
    +
    +// errRecover is the handler that turns panics into returns from the top
    +// level of Parse.
    +func errRecover(errp *error) {
    +	e := recover()
    +	if e != nil {
    +		switch err := e.(type) {
    +		case runtime.Error:
    +			panic(e)
    +		case error:
    +			*errp = err
    +		default:
    +			panic(e)
    +		}
    +	}
    +}
    +
    +// ExecuteTemplate applies the template associated with t that has the given name
    +// to the specified data object and writes the output to wr.
    +// If an error occurs executing the template or writing its output,
    +// execution stops, but partial results may already have been written to
    +// the output writer.
    +// A template may be executed safely in parallel.
    +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
    +	tmpl := t.tmpl[name]
    +	if tmpl == nil {
    +		return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
    +	}
    +	return tmpl.Execute(wr, data)
    +}
    +
    +// Execute applies a parsed template to the specified data object,
    +// and writes the output to wr.
    +// If an error occurs executing the template or writing its output,
    +// execution stops, but partial results may already have been written to
    +// the output writer.
    +// A template may be executed safely in parallel.
    +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
    +	defer errRecover(&err)
    +	value := reflect.ValueOf(data)
    +	state := &state{
    +		tmpl: t,
    +		wr:   wr,
    +		vars: []variable{{"$", value}},
    +	}
    +	t.init()
    +	if t.Tree == nil || t.Root == nil {
    +		var b bytes.Buffer
    +		for name, tmpl := range t.tmpl {
    +			if tmpl.Tree == nil || tmpl.Root == nil {
    +				continue
    +			}
    +			if b.Len() > 0 {
    +				b.WriteString(", ")
    +			}
    +			fmt.Fprintf(&b, "%q", name)
    +		}
    +		var s string
    +		if b.Len() > 0 {
    +			s = "; defined templates are: " + b.String()
    +		}
    +		state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
    +	}
    +	state.walk(value, t.Root)
    +	return
    +}
    +
    +// Walk functions step through the major pieces of the template structure,
    +// generating output as they go.
    +func (s *state) walk(dot reflect.Value, node parse.Node) {
    +	s.at(node)
    +	switch node := node.(type) {
    +	case *parse.ActionNode:
    +		// Do not pop variables so they persist until next end.
    +		// Also, if the action declares variables, don't print the result.
    +		val := s.evalPipeline(dot, node.Pipe)
    +		if len(node.Pipe.Decl) == 0 {
    +			s.printValue(node, val)
    +		}
    +	case *parse.IfNode:
    +		s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
    +	case *parse.ListNode:
    +		for _, node := range node.Nodes {
    +			s.walk(dot, node)
    +		}
    +	case *parse.RangeNode:
    +		s.walkRange(dot, node)
    +	case *parse.TemplateNode:
    +		s.walkTemplate(dot, node)
    +	case *parse.TextNode:
    +		if _, err := s.wr.Write(node.Text); err != nil {
    +			s.errorf("%s", err)
    +		}
    +	case *parse.WithNode:
    +		s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
    +	default:
    +		s.errorf("unknown node: %s", node)
    +	}
    +}
    +
    +// walkIfOrWith walks an 'if' or 'with' node. The two control structures
    +// are identical in behavior except that 'with' sets dot.
    +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
    +	defer s.pop(s.mark())
    +	val := s.evalPipeline(dot, pipe)
    +	truth, ok := isTrue(val)
    +	if !ok {
    +		s.errorf("if/with can't use %v", val)
    +	}
    +	if truth {
    +		if typ == parse.NodeWith {
    +			s.walk(val, list)
    +		} else {
    +			s.walk(dot, list)
    +		}
    +	} else if elseList != nil {
    +		s.walk(dot, elseList)
    +	}
    +}
    +
    +// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
    +// and whether the value has a meaningful truth value.
    +func isTrue(val reflect.Value) (truth, ok bool) {
    +	if !val.IsValid() {
    +		// Something like var x interface{}, never set. It's a form of nil.
    +		return false, true
    +	}
    +	switch val.Kind() {
    +	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
    +		truth = val.Len() > 0
    +	case reflect.Bool:
    +		truth = val.Bool()
    +	case reflect.Complex64, reflect.Complex128:
    +		truth = val.Complex() != 0
    +	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
    +		truth = !val.IsNil()
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		truth = val.Int() != 0
    +	case reflect.Float32, reflect.Float64:
    +		truth = val.Float() != 0
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    +		truth = val.Uint() != 0
    +	case reflect.Struct:
    +		truth = true // Struct values are always true.
    +	default:
    +		return
    +	}
    +	return truth, true
    +}
    +
    +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
    +	s.at(r)
    +	defer s.pop(s.mark())
    +	val, _ := indirect(s.evalPipeline(dot, r.Pipe))
    +	// mark top of stack before any variables in the body are pushed.
    +	mark := s.mark()
    +	oneIteration := func(index, elem reflect.Value) {
    +		// Set top var (lexically the second if there are two) to the element.
    +		if len(r.Pipe.Decl) > 0 {
    +			s.setVar(1, elem)
    +		}
    +		// Set next var (lexically the first if there are two) to the index.
    +		if len(r.Pipe.Decl) > 1 {
    +			s.setVar(2, index)
    +		}
    +		s.walk(elem, r.List)
    +		s.pop(mark)
    +	}
    +	switch val.Kind() {
    +	case reflect.Array, reflect.Slice:
    +		if val.Len() == 0 {
    +			break
    +		}
    +		for i := 0; i < val.Len(); i++ {
    +			oneIteration(reflect.ValueOf(i), val.Index(i))
    +		}
    +		return
    +	case reflect.Map:
    +		if val.Len() == 0 {
    +			break
    +		}
    +		for _, key := range sortKeys(val.MapKeys()) {
    +			oneIteration(key, val.MapIndex(key))
    +		}
    +		return
    +	case reflect.Chan:
    +		if val.IsNil() {
    +			break
    +		}
    +		i := 0
    +		for ; ; i++ {
    +			elem, ok := val.Recv()
    +			if !ok {
    +				break
    +			}
    +			oneIteration(reflect.ValueOf(i), elem)
    +		}
    +		if i == 0 {
    +			break
    +		}
    +		return
    +	case reflect.Invalid:
    +		break // An invalid value is likely a nil map, etc. and acts like an empty map.
    +	default:
    +		s.errorf("range can't iterate over %v", val)
    +	}
    +	if r.ElseList != nil {
    +		s.walk(dot, r.ElseList)
    +	}
    +}
    +
    +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
    +	s.at(t)
    +	tmpl := s.tmpl.tmpl[t.Name]
    +	if tmpl == nil {
    +		s.errorf("template %q not defined", t.Name)
    +	}
    +	// Variables declared by the pipeline persist.
    +	dot = s.evalPipeline(dot, t.Pipe)
    +	newState := *s
    +	newState.tmpl = tmpl
    +	// No dynamic scoping: template invocations inherit no variables.
    +	newState.vars = []variable{{"$", dot}}
    +	newState.walk(dot, tmpl.Root)
    +}
    +
    +// Eval functions evaluate pipelines, commands, and their elements and extract
    +// values from the data structure by examining fields, calling methods, and so on.
    +// The printing of those values happens only through walk functions.
    +
    +// evalPipeline returns the value acquired by evaluating a pipeline. If the
    +// pipeline has a variable declaration, the variable will be pushed on the
    +// stack. Callers should therefore pop the stack after they are finished
    +// executing commands depending on the pipeline value.
    +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
    +	if pipe == nil {
    +		return
    +	}
    +	s.at(pipe)
    +	for _, cmd := range pipe.Cmds {
    +		value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
    +		// If the object has type interface{}, dig down one level to the thing inside.
    +		if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
    +			value = reflect.ValueOf(value.Interface()) // lovely!
    +		}
    +	}
    +	for _, variable := range pipe.Decl {
    +		s.push(variable.Ident[0], value)
    +	}
    +	return value
    +}
    +
    +func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
    +	if len(args) > 1 || final.IsValid() {
    +		s.errorf("can't give argument to non-function %s", args[0])
    +	}
    +}
    +
    +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
    +	firstWord := cmd.Args[0]
    +	switch n := firstWord.(type) {
    +	case *parse.FieldNode:
    +		return s.evalFieldNode(dot, n, cmd.Args, final)
    +	case *parse.ChainNode:
    +		return s.evalChainNode(dot, n, cmd.Args, final)
    +	case *parse.IdentifierNode:
    +		// Must be a function.
    +		return s.evalFunction(dot, n, cmd, cmd.Args, final)
    +	case *parse.PipeNode:
    +		// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
    +		return s.evalPipeline(dot, n)
    +	case *parse.VariableNode:
    +		return s.evalVariableNode(dot, n, cmd.Args, final)
    +	}
    +	s.at(firstWord)
    +	s.notAFunction(cmd.Args, final)
    +	switch word := firstWord.(type) {
    +	case *parse.BoolNode:
    +		return reflect.ValueOf(word.True)
    +	case *parse.DotNode:
    +		return dot
    +	case *parse.NilNode:
    +		s.errorf("nil is not a command")
    +	case *parse.NumberNode:
    +		return s.idealConstant(word)
    +	case *parse.StringNode:
    +		return reflect.ValueOf(word.Text)
    +	}
    +	s.errorf("can't evaluate command %q", firstWord)
    +	panic("not reached")
    +}
    +
    +// idealConstant is called to return the value of a number in a context where
    +// we don't know the type. In that case, the syntax of the number tells us
    +// its type, and we use Go rules to resolve.  Note there is no such thing as
    +// a uint ideal constant in this situation - the value must be of int type.
    +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
    +	// These are ideal constants but we don't know the type
    +	// and we have no context.  (If it was a method argument,
    +	// we'd know what we need.) The syntax guides us to some extent.
    +	s.at(constant)
    +	switch {
    +	case constant.IsComplex:
    +		return reflect.ValueOf(constant.Complex128) // incontrovertible.
    +	case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
    +		return reflect.ValueOf(constant.Float64)
    +	case constant.IsInt:
    +		n := int(constant.Int64)
    +		if int64(n) != constant.Int64 {
    +			s.errorf("%s overflows int", constant.Text)
    +		}
    +		return reflect.ValueOf(n)
    +	case constant.IsUint:
    +		s.errorf("%s overflows int", constant.Text)
    +	}
    +	return zero
    +}
    +
    +func isHexConstant(s string) bool {
    +	return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
    +}
    +
    +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
    +	s.at(field)
    +	return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
    +}
    +
    +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
    +	s.at(chain)
    +	// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
    +	pipe := s.evalArg(dot, nil, chain.Node)
    +	if len(chain.Field) == 0 {
    +		s.errorf("internal error: no fields in evalChainNode")
    +	}
    +	return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
    +}
    +
    +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
    +	// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
    +	s.at(variable)
    +	value := s.varValue(variable.Ident[0])
    +	if len(variable.Ident) == 1 {
    +		s.notAFunction(args, final)
    +		return value
    +	}
    +	return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
    +}
    +
    +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
    +// dot is the environment in which to evaluate arguments, while
    +// receiver is the value being walked along the chain.
    +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
    +	n := len(ident)
    +	for i := 0; i < n-1; i++ {
    +		receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
    +	}
    +	// Now if it's a method, it gets the arguments.
    +	return s.evalField(dot, ident[n-1], node, args, final, receiver)
    +}
    +
    +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
    +	s.at(node)
    +	name := node.Ident
    +	function, ok := findFunction(name, s.tmpl)
    +	if !ok {
    +		s.errorf("%q is not a defined function", name)
    +	}
    +	return s.evalCall(dot, function, cmd, name, args, final)
    +}
    +
    +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
    +// The 'final' argument represents the return value from the preceding
    +// value of the pipeline, if any.
    +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
    +	if !receiver.IsValid() {
    +		return zero
    +	}
    +	typ := receiver.Type()
    +	receiver, _ = indirect(receiver)
    +	// Unless it's an interface, need to get to a value of type *T to guarantee
    +	// we see all methods of T and *T.
    +	ptr := receiver
    +	if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
    +		ptr = ptr.Addr()
    +	}
    +	if method := ptr.MethodByName(fieldName); method.IsValid() {
    +		return s.evalCall(dot, method, node, fieldName, args, final)
    +	}
    +	hasArgs := len(args) > 1 || final.IsValid()
    +	// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
    +	receiver, isNil := indirect(receiver)
    +	if isNil {
    +		s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
    +	}
    +	switch receiver.Kind() {
    +	case reflect.Struct:
    +		tField, ok := receiver.Type().FieldByName(fieldName)
    +		if ok {
    +			field := receiver.FieldByIndex(tField.Index)
    +			if tField.PkgPath != "" { // field is unexported
    +				s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
    +			}
    +			// If it's a function, we must call it.
    +			if hasArgs {
    +				s.errorf("%s has arguments but cannot be invoked as function", fieldName)
    +			}
    +			return field
    +		}
    +		s.errorf("%s is not a field of struct type %s", fieldName, typ)
    +	case reflect.Map:
    +		// If it's a map, attempt to use the field name as a key.
    +		nameVal := reflect.ValueOf(fieldName)
    +		if nameVal.Type().AssignableTo(receiver.Type().Key()) {
    +			if hasArgs {
    +				s.errorf("%s is not a method but has arguments", fieldName)
    +			}
    +			return receiver.MapIndex(nameVal)
    +		}
    +	}
    +	s.errorf("can't evaluate field %s in type %s", fieldName, typ)
    +	panic("not reached")
    +}
    +
    +var (
    +	errorType       = reflect.TypeOf((*error)(nil)).Elem()
    +	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
    +)
    +
    +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
    +// it looks just like a function call.  The arg list, if non-nil, includes (in the manner of the shell), arg[0]
    +// as the function itself.
    +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
    +	if args != nil {
    +		args = args[1:] // Zeroth arg is function name/node; not passed to function.
    +	}
    +	typ := fun.Type()
    +	numIn := len(args)
    +	if final.IsValid() {
    +		numIn++
    +	}
    +	numFixed := len(args)
    +	if typ.IsVariadic() {
    +		numFixed = typ.NumIn() - 1 // last arg is the variadic one.
    +		if numIn < numFixed {
    +			s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
    +		}
    +	} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
    +		s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
    +	}
    +	if !goodFunc(typ) {
    +		// TODO: This could still be a confusing error; maybe goodFunc should provide info.
    +		s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
    +	}
    +	// Build the arg list.
    +	argv := make([]reflect.Value, numIn)
    +	// Args must be evaluated. Fixed args first.
    +	i := 0
    +	for ; i < numFixed && i < len(args); i++ {
    +		argv[i] = s.evalArg(dot, typ.In(i), args[i])
    +	}
    +	// Now the ... args.
    +	if typ.IsVariadic() {
    +		argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
    +		for ; i < len(args); i++ {
    +			argv[i] = s.evalArg(dot, argType, args[i])
    +		}
    +	}
    +	// Add final value if necessary.
    +	if final.IsValid() {
    +		t := typ.In(typ.NumIn() - 1)
    +		if typ.IsVariadic() {
    +			t = t.Elem()
    +		}
    +		argv[i] = s.validateType(final, t)
    +	}
    +	result := fun.Call(argv)
    +	// If we have an error that is not nil, stop execution and return that error to the caller.
    +	if len(result) == 2 && !result[1].IsNil() {
    +		s.at(node)
    +		s.errorf("error calling %s: %s", name, result[1].Interface().(error))
    +	}
    +	return result[0]
    +}
    +
    +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
    +func canBeNil(typ reflect.Type) bool {
    +	switch typ.Kind() {
    +	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
    +		return true
    +	}
    +	return false
    +}
    +
    +// validateType guarantees that the value is valid and assignable to the type.
    +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
    +	if !value.IsValid() {
    +		if typ == nil || canBeNil(typ) {
    +			// An untyped nil interface{}. Accept as a proper nil value.
    +			return reflect.Zero(typ)
    +		}
    +		s.errorf("invalid value; expected %s", typ)
    +	}
    +	if typ != nil && !value.Type().AssignableTo(typ) {
    +		if value.Kind() == reflect.Interface && !value.IsNil() {
    +			value = value.Elem()
    +			if value.Type().AssignableTo(typ) {
    +				return value
    +			}
    +			// fallthrough
    +		}
    +		// Does one dereference or indirection work? We could do more, as we
    +		// do with method receivers, but that gets messy and method receivers
    +		// are much more constrained, so it makes more sense there than here.
    +		// Besides, one is almost always all you need.
    +		switch {
    +		case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
    +			value = value.Elem()
    +			if !value.IsValid() {
    +				s.errorf("dereference of nil pointer of type %s", typ)
    +			}
    +		case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
    +			value = value.Addr()
    +		default:
    +			s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
    +		}
    +	}
    +	return value
    +}
    +
    +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
    +	s.at(n)
    +	switch arg := n.(type) {
    +	case *parse.DotNode:
    +		return s.validateType(dot, typ)
    +	case *parse.NilNode:
    +		if canBeNil(typ) {
    +			return reflect.Zero(typ)
    +		}
    +		s.errorf("cannot assign nil to %s", typ)
    +	case *parse.FieldNode:
    +		return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
    +	case *parse.VariableNode:
    +		return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
    +	case *parse.PipeNode:
    +		return s.validateType(s.evalPipeline(dot, arg), typ)
    +	case *parse.IdentifierNode:
    +		return s.evalFunction(dot, arg, arg, nil, zero)
    +	case *parse.ChainNode:
    +		return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
    +	}
    +	switch typ.Kind() {
    +	case reflect.Bool:
    +		return s.evalBool(typ, n)
    +	case reflect.Complex64, reflect.Complex128:
    +		return s.evalComplex(typ, n)
    +	case reflect.Float32, reflect.Float64:
    +		return s.evalFloat(typ, n)
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		return s.evalInteger(typ, n)
    +	case reflect.Interface:
    +		if typ.NumMethod() == 0 {
    +			return s.evalEmptyInterface(dot, n)
    +		}
    +	case reflect.String:
    +		return s.evalString(typ, n)
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    +		return s.evalUnsignedInteger(typ, n)
    +	}
    +	s.errorf("can't handle %s for arg of type %s", n, typ)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
    +	s.at(n)
    +	if n, ok := n.(*parse.BoolNode); ok {
    +		value := reflect.New(typ).Elem()
    +		value.SetBool(n.True)
    +		return value
    +	}
    +	s.errorf("expected bool; found %s", n)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
    +	s.at(n)
    +	if n, ok := n.(*parse.StringNode); ok {
    +		value := reflect.New(typ).Elem()
    +		value.SetString(n.Text)
    +		return value
    +	}
    +	s.errorf("expected string; found %s", n)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
    +	s.at(n)
    +	if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
    +		value := reflect.New(typ).Elem()
    +		value.SetInt(n.Int64)
    +		return value
    +	}
    +	s.errorf("expected integer; found %s", n)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
    +	s.at(n)
    +	if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
    +		value := reflect.New(typ).Elem()
    +		value.SetUint(n.Uint64)
    +		return value
    +	}
    +	s.errorf("expected unsigned integer; found %s", n)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
    +	s.at(n)
    +	if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
    +		value := reflect.New(typ).Elem()
    +		value.SetFloat(n.Float64)
    +		return value
    +	}
    +	s.errorf("expected float; found %s", n)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
    +	if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
    +		value := reflect.New(typ).Elem()
    +		value.SetComplex(n.Complex128)
    +		return value
    +	}
    +	s.errorf("expected complex; found %s", n)
    +	panic("not reached")
    +}
    +
    +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
    +	s.at(n)
    +	switch n := n.(type) {
    +	case *parse.BoolNode:
    +		return reflect.ValueOf(n.True)
    +	case *parse.DotNode:
    +		return dot
    +	case *parse.FieldNode:
    +		return s.evalFieldNode(dot, n, nil, zero)
    +	case *parse.IdentifierNode:
    +		return s.evalFunction(dot, n, n, nil, zero)
    +	case *parse.NilNode:
    +		// NilNode is handled in evalArg, the only place that calls here.
    +		s.errorf("evalEmptyInterface: nil (can't happen)")
    +	case *parse.NumberNode:
    +		return s.idealConstant(n)
    +	case *parse.StringNode:
    +		return reflect.ValueOf(n.Text)
    +	case *parse.VariableNode:
    +		return s.evalVariableNode(dot, n, nil, zero)
    +	case *parse.PipeNode:
    +		return s.evalPipeline(dot, n)
    +	}
    +	s.errorf("can't handle assignment of %s to empty interface argument", n)
    +	panic("not reached")
    +}
    +
    +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
    +// We indirect through pointers and empty interfaces (only) because
    +// non-empty interfaces have methods we might need.
    +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
    +	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
    +		if v.IsNil() {
    +			return v, true
    +		}
    +		if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
    +			break
    +		}
    +	}
    +	return v, false
    +}
    +
    +// printValue writes the textual representation of the value to the output of
    +// the template.
    +func (s *state) printValue(n parse.Node, v reflect.Value) {
    +	s.at(n)
    +	iface, ok := printableValue(v)
    +	if !ok {
    +		s.errorf("can't print %s of type %s", n, v.Type())
    +	}
    +	fmt.Fprint(s.wr, iface)
    +}
    +
    +// printableValue returns the, possibly indirected, interface value inside v that
    +// is best for a call to formatted printer.
    +func printableValue(v reflect.Value) (interface{}, bool) {
    +	if v.Kind() == reflect.Ptr {
    +		v, _ = indirect(v) // fmt.Fprint handles nil.
    +	}
    +	if !v.IsValid() {
    +		return "", true
    +	}
    +
    +	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
    +		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
    +			v = v.Addr()
    +		} else {
    +			switch v.Kind() {
    +			case reflect.Chan, reflect.Func:
    +				return nil, false
    +			}
    +		}
    +	}
    +	return v.Interface(), true
    +}
    +
    +// Types to help sort the keys in a map for reproducible output.
    +
    +type rvs []reflect.Value
    +
    +func (x rvs) Len() int      { return len(x) }
    +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
    +
    +type rvInts struct{ rvs }
    +
    +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
    +
    +type rvUints struct{ rvs }
    +
    +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
    +
    +type rvFloats struct{ rvs }
    +
    +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
    +
    +type rvStrings struct{ rvs }
    +
    +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
    +
    +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
    +func sortKeys(v []reflect.Value) []reflect.Value {
    +	if len(v) <= 1 {
    +		return v
    +	}
    +	switch v[0].Kind() {
    +	case reflect.Float32, reflect.Float64:
    +		sort.Sort(rvFloats{v})
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		sort.Sort(rvInts{v})
    +	case reflect.String:
    +		sort.Sort(rvStrings{v})
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    +		sort.Sort(rvUints{v})
    +	}
    +	return v
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/funcs.go b/src/prometheus/vendor/github.com/alecthomas/template/funcs.go
    new file mode 100644
    index 0000000..39ee5ed
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/funcs.go
    @@ -0,0 +1,598 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package template
    +
    +import (
    +	"bytes"
    +	"errors"
    +	"fmt"
    +	"io"
    +	"net/url"
    +	"reflect"
    +	"strings"
    +	"unicode"
    +	"unicode/utf8"
    +)
    +
    +// FuncMap is the type of the map defining the mapping from names to functions.
    +// Each function must have either a single return value, or two return values of
    +// which the second has type error. In that case, if the second (error)
    +// return value evaluates to non-nil during execution, execution terminates and
    +// Execute returns that error.
    +type FuncMap map[string]interface{}
    +
    +var builtins = FuncMap{
    +	"and":      and,
    +	"call":     call,
    +	"html":     HTMLEscaper,
    +	"index":    index,
    +	"js":       JSEscaper,
    +	"len":      length,
    +	"not":      not,
    +	"or":       or,
    +	"print":    fmt.Sprint,
    +	"printf":   fmt.Sprintf,
    +	"println":  fmt.Sprintln,
    +	"urlquery": URLQueryEscaper,
    +
    +	// Comparisons
    +	"eq": eq, // ==
    +	"ge": ge, // >=
    +	"gt": gt, // >
    +	"le": le, // <=
    +	"lt": lt, // <
    +	"ne": ne, // !=
    +}
    +
    +var builtinFuncs = createValueFuncs(builtins)
    +
    +// createValueFuncs turns a FuncMap into a map[string]reflect.Value
    +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
    +	m := make(map[string]reflect.Value)
    +	addValueFuncs(m, funcMap)
    +	return m
    +}
    +
    +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
    +func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
    +	for name, fn := range in {
    +		v := reflect.ValueOf(fn)
    +		if v.Kind() != reflect.Func {
    +			panic("value for " + name + " not a function")
    +		}
    +		if !goodFunc(v.Type()) {
    +			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
    +		}
    +		out[name] = v
    +	}
    +}
    +
    +// addFuncs adds to values the functions in funcs. It does no checking of the input -
    +// call addValueFuncs first.
    +func addFuncs(out, in FuncMap) {
    +	for name, fn := range in {
    +		out[name] = fn
    +	}
    +}
    +
    +// goodFunc checks that the function or method has the right result signature.
    +func goodFunc(typ reflect.Type) bool {
    +	// We allow functions with 1 result or 2 results where the second is an error.
    +	switch {
    +	case typ.NumOut() == 1:
    +		return true
    +	case typ.NumOut() == 2 && typ.Out(1) == errorType:
    +		return true
    +	}
    +	return false
    +}
    +
    +// findFunction looks for a function in the template, and global map.
    +func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
    +	if tmpl != nil && tmpl.common != nil {
    +		if fn := tmpl.execFuncs[name]; fn.IsValid() {
    +			return fn, true
    +		}
    +	}
    +	if fn := builtinFuncs[name]; fn.IsValid() {
    +		return fn, true
    +	}
    +	return reflect.Value{}, false
    +}
    +
    +// Indexing.
    +
    +// index returns the result of indexing its first argument by the following
    +// arguments.  Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
    +// indexed item must be a map, slice, or array.
    +func index(item interface{}, indices ...interface{}) (interface{}, error) {
    +	v := reflect.ValueOf(item)
    +	for _, i := range indices {
    +		index := reflect.ValueOf(i)
    +		var isNil bool
    +		if v, isNil = indirect(v); isNil {
    +			return nil, fmt.Errorf("index of nil pointer")
    +		}
    +		switch v.Kind() {
    +		case reflect.Array, reflect.Slice, reflect.String:
    +			var x int64
    +			switch index.Kind() {
    +			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +				x = index.Int()
    +			case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    +				x = int64(index.Uint())
    +			default:
    +				return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
    +			}
    +			if x < 0 || x >= int64(v.Len()) {
    +				return nil, fmt.Errorf("index out of range: %d", x)
    +			}
    +			v = v.Index(int(x))
    +		case reflect.Map:
    +			if !index.IsValid() {
    +				index = reflect.Zero(v.Type().Key())
    +			}
    +			if !index.Type().AssignableTo(v.Type().Key()) {
    +				return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
    +			}
    +			if x := v.MapIndex(index); x.IsValid() {
    +				v = x
    +			} else {
    +				v = reflect.Zero(v.Type().Elem())
    +			}
    +		default:
    +			return nil, fmt.Errorf("can't index item of type %s", v.Type())
    +		}
    +	}
    +	return v.Interface(), nil
    +}
    +
    +// Length
    +
    +// length returns the length of the item, with an error if it has no defined length.
    +func length(item interface{}) (int, error) {
    +	v, isNil := indirect(reflect.ValueOf(item))
    +	if isNil {
    +		return 0, fmt.Errorf("len of nil pointer")
    +	}
    +	switch v.Kind() {
    +	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
    +		return v.Len(), nil
    +	}
    +	return 0, fmt.Errorf("len of type %s", v.Type())
    +}
    +
    +// Function invocation
    +
    +// call returns the result of evaluating the first argument as a function.
    +// The function must return 1 result, or 2 results, the second of which is an error.
    +func call(fn interface{}, args ...interface{}) (interface{}, error) {
    +	v := reflect.ValueOf(fn)
    +	typ := v.Type()
    +	if typ.Kind() != reflect.Func {
    +		return nil, fmt.Errorf("non-function of type %s", typ)
    +	}
    +	if !goodFunc(typ) {
    +		return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
    +	}
    +	numIn := typ.NumIn()
    +	var dddType reflect.Type
    +	if typ.IsVariadic() {
    +		if len(args) < numIn-1 {
    +			return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
    +		}
    +		dddType = typ.In(numIn - 1).Elem()
    +	} else {
    +		if len(args) != numIn {
    +			return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
    +		}
    +	}
    +	argv := make([]reflect.Value, len(args))
    +	for i, arg := range args {
    +		value := reflect.ValueOf(arg)
    +		// Compute the expected type. Clumsy because of variadics.
    +		var argType reflect.Type
    +		if !typ.IsVariadic() || i < numIn-1 {
    +			argType = typ.In(i)
    +		} else {
    +			argType = dddType
    +		}
    +		if !value.IsValid() && canBeNil(argType) {
    +			value = reflect.Zero(argType)
    +		}
    +		if !value.Type().AssignableTo(argType) {
    +			return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
    +		}
    +		argv[i] = value
    +	}
    +	result := v.Call(argv)
    +	if len(result) == 2 && !result[1].IsNil() {
    +		return result[0].Interface(), result[1].Interface().(error)
    +	}
    +	return result[0].Interface(), nil
    +}
    +
    +// Boolean logic.
    +
    +func truth(a interface{}) bool {
    +	t, _ := isTrue(reflect.ValueOf(a))
    +	return t
    +}
    +
    +// and computes the Boolean AND of its arguments, returning
    +// the first false argument it encounters, or the last argument.
    +func and(arg0 interface{}, args ...interface{}) interface{} {
    +	if !truth(arg0) {
    +		return arg0
    +	}
    +	for i := range args {
    +		arg0 = args[i]
    +		if !truth(arg0) {
    +			break
    +		}
    +	}
    +	return arg0
    +}
    +
    +// or computes the Boolean OR of its arguments, returning
    +// the first true argument it encounters, or the last argument.
    +func or(arg0 interface{}, args ...interface{}) interface{} {
    +	if truth(arg0) {
    +		return arg0
    +	}
    +	for i := range args {
    +		arg0 = args[i]
    +		if truth(arg0) {
    +			break
    +		}
    +	}
    +	return arg0
    +}
    +
    +// not returns the Boolean negation of its argument.
    +func not(arg interface{}) (truth bool) {
    +	truth, _ = isTrue(reflect.ValueOf(arg))
    +	return !truth
    +}
    +
    +// Comparison.
    +
    +// TODO: Perhaps allow comparison between signed and unsigned integers.
    +
    +var (
    +	errBadComparisonType = errors.New("invalid type for comparison")
    +	errBadComparison     = errors.New("incompatible types for comparison")
    +	errNoComparison      = errors.New("missing argument for comparison")
    +)
    +
    +type kind int
    +
    +const (
    +	invalidKind kind = iota
    +	boolKind
    +	complexKind
    +	intKind
    +	floatKind
    +	integerKind
    +	stringKind
    +	uintKind
    +)
    +
    +func basicKind(v reflect.Value) (kind, error) {
    +	switch v.Kind() {
    +	case reflect.Bool:
    +		return boolKind, nil
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		return intKind, nil
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    +		return uintKind, nil
    +	case reflect.Float32, reflect.Float64:
    +		return floatKind, nil
    +	case reflect.Complex64, reflect.Complex128:
    +		return complexKind, nil
    +	case reflect.String:
    +		return stringKind, nil
    +	}
    +	return invalidKind, errBadComparisonType
    +}
    +
    +// eq evaluates the comparison a == b || a == c || ...
    +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
    +	v1 := reflect.ValueOf(arg1)
    +	k1, err := basicKind(v1)
    +	if err != nil {
    +		return false, err
    +	}
    +	if len(arg2) == 0 {
    +		return false, errNoComparison
    +	}
    +	for _, arg := range arg2 {
    +		v2 := reflect.ValueOf(arg)
    +		k2, err := basicKind(v2)
    +		if err != nil {
    +			return false, err
    +		}
    +		truth := false
    +		if k1 != k2 {
    +			// Special case: Can compare integer values regardless of type's sign.
    +			switch {
    +			case k1 == intKind && k2 == uintKind:
    +				truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
    +			case k1 == uintKind && k2 == intKind:
    +				truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
    +			default:
    +				return false, errBadComparison
    +			}
    +		} else {
    +			switch k1 {
    +			case boolKind:
    +				truth = v1.Bool() == v2.Bool()
    +			case complexKind:
    +				truth = v1.Complex() == v2.Complex()
    +			case floatKind:
    +				truth = v1.Float() == v2.Float()
    +			case intKind:
    +				truth = v1.Int() == v2.Int()
    +			case stringKind:
    +				truth = v1.String() == v2.String()
    +			case uintKind:
    +				truth = v1.Uint() == v2.Uint()
    +			default:
    +				panic("invalid kind")
    +			}
    +		}
    +		if truth {
    +			return true, nil
    +		}
    +	}
    +	return false, nil
    +}
    +
    +// ne evaluates the comparison a != b.
    +func ne(arg1, arg2 interface{}) (bool, error) {
    +	// != is the inverse of ==.
    +	equal, err := eq(arg1, arg2)
    +	return !equal, err
    +}
    +
    +// lt evaluates the comparison a < b.
    +func lt(arg1, arg2 interface{}) (bool, error) {
    +	v1 := reflect.ValueOf(arg1)
    +	k1, err := basicKind(v1)
    +	if err != nil {
    +		return false, err
    +	}
    +	v2 := reflect.ValueOf(arg2)
    +	k2, err := basicKind(v2)
    +	if err != nil {
    +		return false, err
    +	}
    +	truth := false
    +	if k1 != k2 {
    +		// Special case: Can compare integer values regardless of type's sign.
    +		switch {
    +		case k1 == intKind && k2 == uintKind:
    +			truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
    +		case k1 == uintKind && k2 == intKind:
    +			truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
    +		default:
    +			return false, errBadComparison
    +		}
    +	} else {
    +		switch k1 {
    +		case boolKind, complexKind:
    +			return false, errBadComparisonType
    +		case floatKind:
    +			truth = v1.Float() < v2.Float()
    +		case intKind:
    +			truth = v1.Int() < v2.Int()
    +		case stringKind:
    +			truth = v1.String() < v2.String()
    +		case uintKind:
    +			truth = v1.Uint() < v2.Uint()
    +		default:
    +			panic("invalid kind")
    +		}
    +	}
    +	return truth, nil
    +}
    +
    +// le evaluates the comparison <= b.
    +func le(arg1, arg2 interface{}) (bool, error) {
    +	// <= is < or ==.
    +	lessThan, err := lt(arg1, arg2)
    +	if lessThan || err != nil {
    +		return lessThan, err
    +	}
    +	return eq(arg1, arg2)
    +}
    +
    +// gt evaluates the comparison a > b.
    +func gt(arg1, arg2 interface{}) (bool, error) {
    +	// > is the inverse of <=.
    +	lessOrEqual, err := le(arg1, arg2)
    +	if err != nil {
    +		return false, err
    +	}
    +	return !lessOrEqual, nil
    +}
    +
    +// ge evaluates the comparison a >= b.
    +func ge(arg1, arg2 interface{}) (bool, error) {
    +	// >= is the inverse of <.
    +	lessThan, err := lt(arg1, arg2)
    +	if err != nil {
    +		return false, err
    +	}
    +	return !lessThan, nil
    +}
    +
    +// HTML escaping.
    +
    +var (
    +	htmlQuot = []byte(""") // shorter than """
    +	htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
    +	htmlAmp  = []byte("&")
    +	htmlLt   = []byte("<")
    +	htmlGt   = []byte(">")
    +)
    +
    +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
    +func HTMLEscape(w io.Writer, b []byte) {
    +	last := 0
    +	for i, c := range b {
    +		var html []byte
    +		switch c {
    +		case '"':
    +			html = htmlQuot
    +		case '\'':
    +			html = htmlApos
    +		case '&':
    +			html = htmlAmp
    +		case '<':
    +			html = htmlLt
    +		case '>':
    +			html = htmlGt
    +		default:
    +			continue
    +		}
    +		w.Write(b[last:i])
    +		w.Write(html)
    +		last = i + 1
    +	}
    +	w.Write(b[last:])
    +}
    +
    +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
    +func HTMLEscapeString(s string) string {
    +	// Avoid allocation if we can.
    +	if strings.IndexAny(s, `'"&<>`) < 0 {
    +		return s
    +	}
    +	var b bytes.Buffer
    +	HTMLEscape(&b, []byte(s))
    +	return b.String()
    +}
    +
    +// HTMLEscaper returns the escaped HTML equivalent of the textual
    +// representation of its arguments.
    +func HTMLEscaper(args ...interface{}) string {
    +	return HTMLEscapeString(evalArgs(args))
    +}
    +
    +// JavaScript escaping.
    +
    +var (
    +	jsLowUni = []byte(`\u00`)
    +	hex      = []byte("0123456789ABCDEF")
    +
    +	jsBackslash = []byte(`\\`)
    +	jsApos      = []byte(`\'`)
    +	jsQuot      = []byte(`\"`)
    +	jsLt        = []byte(`\x3C`)
    +	jsGt        = []byte(`\x3E`)
    +)
    +
    +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
    +func JSEscape(w io.Writer, b []byte) {
    +	last := 0
    +	for i := 0; i < len(b); i++ {
    +		c := b[i]
    +
    +		if !jsIsSpecial(rune(c)) {
    +			// fast path: nothing to do
    +			continue
    +		}
    +		w.Write(b[last:i])
    +
    +		if c < utf8.RuneSelf {
    +			// Quotes, slashes and angle brackets get quoted.
    +			// Control characters get written as \u00XX.
    +			switch c {
    +			case '\\':
    +				w.Write(jsBackslash)
    +			case '\'':
    +				w.Write(jsApos)
    +			case '"':
    +				w.Write(jsQuot)
    +			case '<':
    +				w.Write(jsLt)
    +			case '>':
    +				w.Write(jsGt)
    +			default:
    +				w.Write(jsLowUni)
    +				t, b := c>>4, c&0x0f
    +				w.Write(hex[t : t+1])
    +				w.Write(hex[b : b+1])
    +			}
    +		} else {
    +			// Unicode rune.
    +			r, size := utf8.DecodeRune(b[i:])
    +			if unicode.IsPrint(r) {
    +				w.Write(b[i : i+size])
    +			} else {
    +				fmt.Fprintf(w, "\\u%04X", r)
    +			}
    +			i += size - 1
    +		}
    +		last = i + 1
    +	}
    +	w.Write(b[last:])
    +}
    +
    +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
    +func JSEscapeString(s string) string {
    +	// Avoid allocation if we can.
    +	if strings.IndexFunc(s, jsIsSpecial) < 0 {
    +		return s
    +	}
    +	var b bytes.Buffer
    +	JSEscape(&b, []byte(s))
    +	return b.String()
    +}
    +
    +func jsIsSpecial(r rune) bool {
    +	switch r {
    +	case '\\', '\'', '"', '<', '>':
    +		return true
    +	}
    +	return r < ' ' || utf8.RuneSelf <= r
    +}
    +
    +// JSEscaper returns the escaped JavaScript equivalent of the textual
    +// representation of its arguments.
    +func JSEscaper(args ...interface{}) string {
    +	return JSEscapeString(evalArgs(args))
    +}
    +
    +// URLQueryEscaper returns the escaped value of the textual representation of
    +// its arguments in a form suitable for embedding in a URL query.
    +func URLQueryEscaper(args ...interface{}) string {
    +	return url.QueryEscape(evalArgs(args))
    +}
    +
    +// evalArgs formats the list of arguments into a string. It is therefore equivalent to
    +//	fmt.Sprint(args...)
    +// except that each argument is indirected (if a pointer), as required,
    +// using the same rules as the default string evaluation during template
    +// execution.
    +func evalArgs(args []interface{}) string {
    +	ok := false
    +	var s string
    +	// Fast path for simple common case.
    +	if len(args) == 1 {
    +		s, ok = args[0].(string)
    +	}
    +	if !ok {
    +		for i, arg := range args {
    +			a, ok := printableValue(reflect.ValueOf(arg))
    +			if ok {
    +				args[i] = a
    +			} // else left fmt do its thing
    +		}
    +		s = fmt.Sprint(args...)
    +	}
    +	return s
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/helper.go b/src/prometheus/vendor/github.com/alecthomas/template/helper.go
    new file mode 100644
    index 0000000..3636fb5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/helper.go
    @@ -0,0 +1,108 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Helper functions to make constructing templates easier.
    +
    +package template
    +
    +import (
    +	"fmt"
    +	"io/ioutil"
    +	"path/filepath"
    +)
    +
    +// Functions and methods to parse templates.
    +
    +// Must is a helper that wraps a call to a function returning (*Template, error)
    +// and panics if the error is non-nil. It is intended for use in variable
    +// initializations such as
    +//	var t = template.Must(template.New("name").Parse("text"))
    +func Must(t *Template, err error) *Template {
    +	if err != nil {
    +		panic(err)
    +	}
    +	return t
    +}
    +
    +// ParseFiles creates a new Template and parses the template definitions from
    +// the named files. The returned template's name will have the (base) name and
    +// (parsed) contents of the first file. There must be at least one file.
    +// If an error occurs, parsing stops and the returned *Template is nil.
    +func ParseFiles(filenames ...string) (*Template, error) {
    +	return parseFiles(nil, filenames...)
    +}
    +
    +// ParseFiles parses the named files and associates the resulting templates with
    +// t. If an error occurs, parsing stops and the returned template is nil;
    +// otherwise it is t. There must be at least one file.
    +func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
    +	return parseFiles(t, filenames...)
    +}
    +
    +// parseFiles is the helper for the method and function. If the argument
    +// template is nil, it is created from the first file.
    +func parseFiles(t *Template, filenames ...string) (*Template, error) {
    +	if len(filenames) == 0 {
    +		// Not really a problem, but be consistent.
    +		return nil, fmt.Errorf("template: no files named in call to ParseFiles")
    +	}
    +	for _, filename := range filenames {
    +		b, err := ioutil.ReadFile(filename)
    +		if err != nil {
    +			return nil, err
    +		}
    +		s := string(b)
    +		name := filepath.Base(filename)
    +		// First template becomes return value if not already defined,
    +		// and we use that one for subsequent New calls to associate
    +		// all the templates together. Also, if this file has the same name
    +		// as t, this file becomes the contents of t, so
    +		//  t, err := New(name).Funcs(xxx).ParseFiles(name)
    +		// works. Otherwise we create a new template associated with t.
    +		var tmpl *Template
    +		if t == nil {
    +			t = New(name)
    +		}
    +		if name == t.Name() {
    +			tmpl = t
    +		} else {
    +			tmpl = t.New(name)
    +		}
    +		_, err = tmpl.Parse(s)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +	return t, nil
    +}
    +
    +// ParseGlob creates a new Template and parses the template definitions from the
    +// files identified by the pattern, which must match at least one file. The
    +// returned template will have the (base) name and (parsed) contents of the
    +// first file matched by the pattern. ParseGlob is equivalent to calling
    +// ParseFiles with the list of files matched by the pattern.
    +func ParseGlob(pattern string) (*Template, error) {
    +	return parseGlob(nil, pattern)
    +}
    +
    +// ParseGlob parses the template definitions in the files identified by the
    +// pattern and associates the resulting templates with t. The pattern is
    +// processed by filepath.Glob and must match at least one file. ParseGlob is
    +// equivalent to calling t.ParseFiles with the list of files matched by the
    +// pattern.
    +func (t *Template) ParseGlob(pattern string) (*Template, error) {
    +	return parseGlob(t, pattern)
    +}
    +
    +// parseGlob is the implementation of the function and method ParseGlob.
    +func parseGlob(t *Template, pattern string) (*Template, error) {
    +	filenames, err := filepath.Glob(pattern)
    +	if err != nil {
    +		return nil, err
    +	}
    +	if len(filenames) == 0 {
    +		return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
    +	}
    +	return parseFiles(t, filenames...)
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/parse/lex.go b/src/prometheus/vendor/github.com/alecthomas/template/parse/lex.go
    new file mode 100644
    index 0000000..55f1c05
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/parse/lex.go
    @@ -0,0 +1,556 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package parse
    +
    +import (
    +	"fmt"
    +	"strings"
    +	"unicode"
    +	"unicode/utf8"
    +)
    +
    +// item represents a token or text string returned from the scanner.
    +type item struct {
    +	typ itemType // The type of this item.
    +	pos Pos      // The starting position, in bytes, of this item in the input string.
    +	val string   // The value of this item.
    +}
    +
    +func (i item) String() string {
    +	switch {
    +	case i.typ == itemEOF:
    +		return "EOF"
    +	case i.typ == itemError:
    +		return i.val
    +	case i.typ > itemKeyword:
    +		return fmt.Sprintf("<%s>", i.val)
    +	case len(i.val) > 10:
    +		return fmt.Sprintf("%.10q...", i.val)
    +	}
    +	return fmt.Sprintf("%q", i.val)
    +}
    +
    +// itemType identifies the type of lex items.
    +type itemType int
    +
    +const (
    +	itemError        itemType = iota // error occurred; value is text of error
    +	itemBool                         // boolean constant
    +	itemChar                         // printable ASCII character; grab bag for comma etc.
    +	itemCharConstant                 // character constant
    +	itemComplex                      // complex constant (1+2i); imaginary is just a number
    +	itemColonEquals                  // colon-equals (':=') introducing a declaration
    +	itemEOF
    +	itemField        // alphanumeric identifier starting with '.'
    +	itemIdentifier   // alphanumeric identifier not starting with '.'
    +	itemLeftDelim    // left action delimiter
    +	itemLeftParen    // '(' inside action
    +	itemNumber       // simple number, including imaginary
    +	itemPipe         // pipe symbol
    +	itemRawString    // raw quoted string (includes quotes)
    +	itemRightDelim   // right action delimiter
    +	itemElideNewline // elide newline after right delim
    +	itemRightParen   // ')' inside action
    +	itemSpace        // run of spaces separating arguments
    +	itemString       // quoted string (includes quotes)
    +	itemText         // plain text
    +	itemVariable     // variable starting with '$', such as '$' or  '$1' or '$hello'
    +	// Keywords appear after all the rest.
    +	itemKeyword  // used only to delimit the keywords
    +	itemDot      // the cursor, spelled '.'
    +	itemDefine   // define keyword
    +	itemElse     // else keyword
    +	itemEnd      // end keyword
    +	itemIf       // if keyword
    +	itemNil      // the untyped nil constant, easiest to treat as a keyword
    +	itemRange    // range keyword
    +	itemTemplate // template keyword
    +	itemWith     // with keyword
    +)
    +
    +var key = map[string]itemType{
    +	".":        itemDot,
    +	"define":   itemDefine,
    +	"else":     itemElse,
    +	"end":      itemEnd,
    +	"if":       itemIf,
    +	"range":    itemRange,
    +	"nil":      itemNil,
    +	"template": itemTemplate,
    +	"with":     itemWith,
    +}
    +
    +const eof = -1
    +
    +// stateFn represents the state of the scanner as a function that returns the next state.
    +type stateFn func(*lexer) stateFn
    +
    +// lexer holds the state of the scanner.
    +type lexer struct {
    +	name       string    // the name of the input; used only for error reports
    +	input      string    // the string being scanned
    +	leftDelim  string    // start of action
    +	rightDelim string    // end of action
    +	state      stateFn   // the next lexing function to enter
    +	pos        Pos       // current position in the input
    +	start      Pos       // start position of this item
    +	width      Pos       // width of last rune read from input
    +	lastPos    Pos       // position of most recent item returned by nextItem
    +	items      chan item // channel of scanned items
    +	parenDepth int       // nesting depth of ( ) exprs
    +}
    +
    +// next returns the next rune in the input.
    +func (l *lexer) next() rune {
    +	if int(l.pos) >= len(l.input) {
    +		l.width = 0
    +		return eof
    +	}
    +	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
    +	l.width = Pos(w)
    +	l.pos += l.width
    +	return r
    +}
    +
    +// peek returns but does not consume the next rune in the input.
    +func (l *lexer) peek() rune {
    +	r := l.next()
    +	l.backup()
    +	return r
    +}
    +
    +// backup steps back one rune. Can only be called once per call of next.
    +func (l *lexer) backup() {
    +	l.pos -= l.width
    +}
    +
    +// emit passes an item back to the client.
    +func (l *lexer) emit(t itemType) {
    +	l.items <- item{t, l.start, l.input[l.start:l.pos]}
    +	l.start = l.pos
    +}
    +
    +// ignore skips over the pending input before this point.
    +func (l *lexer) ignore() {
    +	l.start = l.pos
    +}
    +
    +// accept consumes the next rune if it's from the valid set.
    +func (l *lexer) accept(valid string) bool {
    +	if strings.IndexRune(valid, l.next()) >= 0 {
    +		return true
    +	}
    +	l.backup()
    +	return false
    +}
    +
    +// acceptRun consumes a run of runes from the valid set.
    +func (l *lexer) acceptRun(valid string) {
    +	for strings.IndexRune(valid, l.next()) >= 0 {
    +	}
    +	l.backup()
    +}
    +
    +// lineNumber reports which line we're on, based on the position of
    +// the previous item returned by nextItem. Doing it this way
    +// means we don't have to worry about peek double counting.
    +func (l *lexer) lineNumber() int {
    +	return 1 + strings.Count(l.input[:l.lastPos], "\n")
    +}
    +
    +// errorf returns an error token and terminates the scan by passing
    +// back a nil pointer that will be the next state, terminating l.nextItem.
    +func (l *lexer) errorf(format string, args ...interface{}) stateFn {
    +	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
    +	return nil
    +}
    +
    +// nextItem returns the next item from the input.
    +func (l *lexer) nextItem() item {
    +	item := <-l.items
    +	l.lastPos = item.pos
    +	return item
    +}
    +
    +// lex creates a new scanner for the input string.
    +func lex(name, input, left, right string) *lexer {
    +	if left == "" {
    +		left = leftDelim
    +	}
    +	if right == "" {
    +		right = rightDelim
    +	}
    +	l := &lexer{
    +		name:       name,
    +		input:      input,
    +		leftDelim:  left,
    +		rightDelim: right,
    +		items:      make(chan item),
    +	}
    +	go l.run()
    +	return l
    +}
    +
    +// run runs the state machine for the lexer.
    +func (l *lexer) run() {
    +	for l.state = lexText; l.state != nil; {
    +		l.state = l.state(l)
    +	}
    +}
    +
    +// state functions
    +
    +const (
    +	leftDelim    = "{{"
    +	rightDelim   = "}}"
    +	leftComment  = "/*"
    +	rightComment = "*/"
    +)
    +
    +// lexText scans until an opening action delimiter, "{{".
    +func lexText(l *lexer) stateFn {
    +	for {
    +		if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
    +			if l.pos > l.start {
    +				l.emit(itemText)
    +			}
    +			return lexLeftDelim
    +		}
    +		if l.next() == eof {
    +			break
    +		}
    +	}
    +	// Correctly reached EOF.
    +	if l.pos > l.start {
    +		l.emit(itemText)
    +	}
    +	l.emit(itemEOF)
    +	return nil
    +}
    +
    +// lexLeftDelim scans the left delimiter, which is known to be present.
    +func lexLeftDelim(l *lexer) stateFn {
    +	l.pos += Pos(len(l.leftDelim))
    +	if strings.HasPrefix(l.input[l.pos:], leftComment) {
    +		return lexComment
    +	}
    +	l.emit(itemLeftDelim)
    +	l.parenDepth = 0
    +	return lexInsideAction
    +}
    +
    +// lexComment scans a comment. The left comment marker is known to be present.
    +func lexComment(l *lexer) stateFn {
    +	l.pos += Pos(len(leftComment))
    +	i := strings.Index(l.input[l.pos:], rightComment)
    +	if i < 0 {
    +		return l.errorf("unclosed comment")
    +	}
    +	l.pos += Pos(i + len(rightComment))
    +	if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
    +		return l.errorf("comment ends before closing delimiter")
    +
    +	}
    +	l.pos += Pos(len(l.rightDelim))
    +	l.ignore()
    +	return lexText
    +}
    +
    +// lexRightDelim scans the right delimiter, which is known to be present.
    +func lexRightDelim(l *lexer) stateFn {
    +	l.pos += Pos(len(l.rightDelim))
    +	l.emit(itemRightDelim)
    +	if l.peek() == '\\' {
    +		l.pos++
    +		l.emit(itemElideNewline)
    +	}
    +	return lexText
    +}
    +
    +// lexInsideAction scans the elements inside action delimiters.
    +func lexInsideAction(l *lexer) stateFn {
    +	// Either number, quoted string, or identifier.
    +	// Spaces separate arguments; runs of spaces turn into itemSpace.
    +	// Pipe symbols separate and are emitted.
    +	if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
    +		if l.parenDepth == 0 {
    +			return lexRightDelim
    +		}
    +		return l.errorf("unclosed left paren")
    +	}
    +	switch r := l.next(); {
    +	case r == eof || isEndOfLine(r):
    +		return l.errorf("unclosed action")
    +	case isSpace(r):
    +		return lexSpace
    +	case r == ':':
    +		if l.next() != '=' {
    +			return l.errorf("expected :=")
    +		}
    +		l.emit(itemColonEquals)
    +	case r == '|':
    +		l.emit(itemPipe)
    +	case r == '"':
    +		return lexQuote
    +	case r == '`':
    +		return lexRawQuote
    +	case r == '$':
    +		return lexVariable
    +	case r == '\'':
    +		return lexChar
    +	case r == '.':
    +		// special look-ahead for ".field" so we don't break l.backup().
    +		if l.pos < Pos(len(l.input)) {
    +			r := l.input[l.pos]
    +			if r < '0' || '9' < r {
    +				return lexField
    +			}
    +		}
    +		fallthrough // '.' can start a number.
    +	case r == '+' || r == '-' || ('0' <= r && r <= '9'):
    +		l.backup()
    +		return lexNumber
    +	case isAlphaNumeric(r):
    +		l.backup()
    +		return lexIdentifier
    +	case r == '(':
    +		l.emit(itemLeftParen)
    +		l.parenDepth++
    +		return lexInsideAction
    +	case r == ')':
    +		l.emit(itemRightParen)
    +		l.parenDepth--
    +		if l.parenDepth < 0 {
    +			return l.errorf("unexpected right paren %#U", r)
    +		}
    +		return lexInsideAction
    +	case r <= unicode.MaxASCII && unicode.IsPrint(r):
    +		l.emit(itemChar)
    +		return lexInsideAction
    +	default:
    +		return l.errorf("unrecognized character in action: %#U", r)
    +	}
    +	return lexInsideAction
    +}
    +
    +// lexSpace scans a run of space characters.
    +// One space has already been seen.
    +func lexSpace(l *lexer) stateFn {
    +	for isSpace(l.peek()) {
    +		l.next()
    +	}
    +	l.emit(itemSpace)
    +	return lexInsideAction
    +}
    +
    +// lexIdentifier scans an alphanumeric.
    +func lexIdentifier(l *lexer) stateFn {
    +Loop:
    +	for {
    +		switch r := l.next(); {
    +		case isAlphaNumeric(r):
    +			// absorb.
    +		default:
    +			l.backup()
    +			word := l.input[l.start:l.pos]
    +			if !l.atTerminator() {
    +				return l.errorf("bad character %#U", r)
    +			}
    +			switch {
    +			case key[word] > itemKeyword:
    +				l.emit(key[word])
    +			case word[0] == '.':
    +				l.emit(itemField)
    +			case word == "true", word == "false":
    +				l.emit(itemBool)
    +			default:
    +				l.emit(itemIdentifier)
    +			}
    +			break Loop
    +		}
    +	}
    +	return lexInsideAction
    +}
    +
    +// lexField scans a field: .Alphanumeric.
    +// The . has been scanned.
    +func lexField(l *lexer) stateFn {
    +	return lexFieldOrVariable(l, itemField)
    +}
    +
    +// lexVariable scans a Variable: $Alphanumeric.
    +// The $ has been scanned.
    +func lexVariable(l *lexer) stateFn {
    +	if l.atTerminator() { // Nothing interesting follows -> "$".
    +		l.emit(itemVariable)
    +		return lexInsideAction
    +	}
    +	return lexFieldOrVariable(l, itemVariable)
    +}
    +
    +// lexVariable scans a field or variable: [.$]Alphanumeric.
    +// The . or $ has been scanned.
    +func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
    +	if l.atTerminator() { // Nothing interesting follows -> "." or "$".
    +		if typ == itemVariable {
    +			l.emit(itemVariable)
    +		} else {
    +			l.emit(itemDot)
    +		}
    +		return lexInsideAction
    +	}
    +	var r rune
    +	for {
    +		r = l.next()
    +		if !isAlphaNumeric(r) {
    +			l.backup()
    +			break
    +		}
    +	}
    +	if !l.atTerminator() {
    +		return l.errorf("bad character %#U", r)
    +	}
    +	l.emit(typ)
    +	return lexInsideAction
    +}
    +
    +// atTerminator reports whether the input is at valid termination character to
    +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
    +// like "$x+2" not being acceptable without a space, in case we decide one
    +// day to implement arithmetic.
    +func (l *lexer) atTerminator() bool {
    +	r := l.peek()
    +	if isSpace(r) || isEndOfLine(r) {
    +		return true
    +	}
    +	switch r {
    +	case eof, '.', ',', '|', ':', ')', '(':
    +		return true
    +	}
    +	// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
    +	// succeed but should fail) but only in extremely rare cases caused by willfully
    +	// bad choice of delimiter.
    +	if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
    +		return true
    +	}
    +	return false
    +}
    +
    +// lexChar scans a character constant. The initial quote is already
    +// scanned. Syntax checking is done by the parser.
    +func lexChar(l *lexer) stateFn {
    +Loop:
    +	for {
    +		switch l.next() {
    +		case '\\':
    +			if r := l.next(); r != eof && r != '\n' {
    +				break
    +			}
    +			fallthrough
    +		case eof, '\n':
    +			return l.errorf("unterminated character constant")
    +		case '\'':
    +			break Loop
    +		}
    +	}
    +	l.emit(itemCharConstant)
    +	return lexInsideAction
    +}
    +
    +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
    +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
    +// and "089" - but when it's wrong the input is invalid and the parser (via
    +// strconv) will notice.
    +func lexNumber(l *lexer) stateFn {
    +	if !l.scanNumber() {
    +		return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
    +	}
    +	if sign := l.peek(); sign == '+' || sign == '-' {
    +		// Complex: 1+2i. No spaces, must end in 'i'.
    +		if !l.scanNumber() || l.input[l.pos-1] != 'i' {
    +			return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
    +		}
    +		l.emit(itemComplex)
    +	} else {
    +		l.emit(itemNumber)
    +	}
    +	return lexInsideAction
    +}
    +
    +func (l *lexer) scanNumber() bool {
    +	// Optional leading sign.
    +	l.accept("+-")
    +	// Is it hex?
    +	digits := "0123456789"
    +	if l.accept("0") && l.accept("xX") {
    +		digits = "0123456789abcdefABCDEF"
    +	}
    +	l.acceptRun(digits)
    +	if l.accept(".") {
    +		l.acceptRun(digits)
    +	}
    +	if l.accept("eE") {
    +		l.accept("+-")
    +		l.acceptRun("0123456789")
    +	}
    +	// Is it imaginary?
    +	l.accept("i")
    +	// Next thing mustn't be alphanumeric.
    +	if isAlphaNumeric(l.peek()) {
    +		l.next()
    +		return false
    +	}
    +	return true
    +}
    +
    +// lexQuote scans a quoted string.
    +func lexQuote(l *lexer) stateFn {
    +Loop:
    +	for {
    +		switch l.next() {
    +		case '\\':
    +			if r := l.next(); r != eof && r != '\n' {
    +				break
    +			}
    +			fallthrough
    +		case eof, '\n':
    +			return l.errorf("unterminated quoted string")
    +		case '"':
    +			break Loop
    +		}
    +	}
    +	l.emit(itemString)
    +	return lexInsideAction
    +}
    +
    +// lexRawQuote scans a raw quoted string.
    +func lexRawQuote(l *lexer) stateFn {
    +Loop:
    +	for {
    +		switch l.next() {
    +		case eof, '\n':
    +			return l.errorf("unterminated raw quoted string")
    +		case '`':
    +			break Loop
    +		}
    +	}
    +	l.emit(itemRawString)
    +	return lexInsideAction
    +}
    +
    +// isSpace reports whether r is a space character.
    +func isSpace(r rune) bool {
    +	return r == ' ' || r == '\t'
    +}
    +
    +// isEndOfLine reports whether r is an end-of-line character.
    +func isEndOfLine(r rune) bool {
    +	return r == '\r' || r == '\n'
    +}
    +
    +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
    +func isAlphaNumeric(r rune) bool {
    +	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/parse/node.go b/src/prometheus/vendor/github.com/alecthomas/template/parse/node.go
    new file mode 100644
    index 0000000..55c37f6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/parse/node.go
    @@ -0,0 +1,834 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Parse nodes.
    +
    +package parse
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"strconv"
    +	"strings"
    +)
    +
    +var textFormat = "%s" // Changed to "%q" in tests for better error messages.
    +
    +// A Node is an element in the parse tree. The interface is trivial.
    +// The interface contains an unexported method so that only
    +// types local to this package can satisfy it.
    +type Node interface {
    +	Type() NodeType
    +	String() string
    +	// Copy does a deep copy of the Node and all its components.
    +	// To avoid type assertions, some XxxNodes also have specialized
    +	// CopyXxx methods that return *XxxNode.
    +	Copy() Node
    +	Position() Pos // byte position of start of node in full original input string
    +	// tree returns the containing *Tree.
    +	// It is unexported so all implementations of Node are in this package.
    +	tree() *Tree
    +}
    +
    +// NodeType identifies the type of a parse tree node.
    +type NodeType int
    +
    +// Pos represents a byte position in the original input text from which
    +// this template was parsed.
    +type Pos int
    +
    +func (p Pos) Position() Pos {
    +	return p
    +}
    +
    +// Type returns itself and provides an easy default implementation
    +// for embedding in a Node. Embedded in all non-trivial Nodes.
    +func (t NodeType) Type() NodeType {
    +	return t
    +}
    +
    +const (
    +	NodeText       NodeType = iota // Plain text.
    +	NodeAction                     // A non-control action such as a field evaluation.
    +	NodeBool                       // A boolean constant.
    +	NodeChain                      // A sequence of field accesses.
    +	NodeCommand                    // An element of a pipeline.
    +	NodeDot                        // The cursor, dot.
    +	nodeElse                       // An else action. Not added to tree.
    +	nodeEnd                        // An end action. Not added to tree.
    +	NodeField                      // A field or method name.
    +	NodeIdentifier                 // An identifier; always a function name.
    +	NodeIf                         // An if action.
    +	NodeList                       // A list of Nodes.
    +	NodeNil                        // An untyped nil constant.
    +	NodeNumber                     // A numerical constant.
    +	NodePipe                       // A pipeline of commands.
    +	NodeRange                      // A range action.
    +	NodeString                     // A string constant.
    +	NodeTemplate                   // A template invocation action.
    +	NodeVariable                   // A $ variable.
    +	NodeWith                       // A with action.
    +)
    +
    +// Nodes.
    +
    +// ListNode holds a sequence of nodes.
    +type ListNode struct {
    +	NodeType
    +	Pos
    +	tr    *Tree
    +	Nodes []Node // The element nodes in lexical order.
    +}
    +
    +func (t *Tree) newList(pos Pos) *ListNode {
    +	return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
    +}
    +
    +func (l *ListNode) append(n Node) {
    +	l.Nodes = append(l.Nodes, n)
    +}
    +
    +func (l *ListNode) tree() *Tree {
    +	return l.tr
    +}
    +
    +func (l *ListNode) String() string {
    +	b := new(bytes.Buffer)
    +	for _, n := range l.Nodes {
    +		fmt.Fprint(b, n)
    +	}
    +	return b.String()
    +}
    +
    +func (l *ListNode) CopyList() *ListNode {
    +	if l == nil {
    +		return l
    +	}
    +	n := l.tr.newList(l.Pos)
    +	for _, elem := range l.Nodes {
    +		n.append(elem.Copy())
    +	}
    +	return n
    +}
    +
    +func (l *ListNode) Copy() Node {
    +	return l.CopyList()
    +}
    +
    +// TextNode holds plain text.
    +type TextNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	Text []byte // The text; may span newlines.
    +}
    +
    +func (t *Tree) newText(pos Pos, text string) *TextNode {
    +	return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
    +}
    +
    +func (t *TextNode) String() string {
    +	return fmt.Sprintf(textFormat, t.Text)
    +}
    +
    +func (t *TextNode) tree() *Tree {
    +	return t.tr
    +}
    +
    +func (t *TextNode) Copy() Node {
    +	return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
    +}
    +
    +// PipeNode holds a pipeline with optional declaration
    +type PipeNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	Line int             // The line number in the input (deprecated; kept for compatibility)
    +	Decl []*VariableNode // Variable declarations in lexical order.
    +	Cmds []*CommandNode  // The commands in lexical order.
    +}
    +
    +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
    +	return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
    +}
    +
    +func (p *PipeNode) append(command *CommandNode) {
    +	p.Cmds = append(p.Cmds, command)
    +}
    +
    +func (p *PipeNode) String() string {
    +	s := ""
    +	if len(p.Decl) > 0 {
    +		for i, v := range p.Decl {
    +			if i > 0 {
    +				s += ", "
    +			}
    +			s += v.String()
    +		}
    +		s += " := "
    +	}
    +	for i, c := range p.Cmds {
    +		if i > 0 {
    +			s += " | "
    +		}
    +		s += c.String()
    +	}
    +	return s
    +}
    +
    +func (p *PipeNode) tree() *Tree {
    +	return p.tr
    +}
    +
    +func (p *PipeNode) CopyPipe() *PipeNode {
    +	if p == nil {
    +		return p
    +	}
    +	var decl []*VariableNode
    +	for _, d := range p.Decl {
    +		decl = append(decl, d.Copy().(*VariableNode))
    +	}
    +	n := p.tr.newPipeline(p.Pos, p.Line, decl)
    +	for _, c := range p.Cmds {
    +		n.append(c.Copy().(*CommandNode))
    +	}
    +	return n
    +}
    +
    +func (p *PipeNode) Copy() Node {
    +	return p.CopyPipe()
    +}
    +
    +// ActionNode holds an action (something bounded by delimiters).
    +// Control actions have their own nodes; ActionNode represents simple
    +// ones such as field evaluations and parenthesized pipelines.
    +type ActionNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	Line int       // The line number in the input (deprecated; kept for compatibility)
    +	Pipe *PipeNode // The pipeline in the action.
    +}
    +
    +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
    +	return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
    +}
    +
    +func (a *ActionNode) String() string {
    +	return fmt.Sprintf("{{%s}}", a.Pipe)
    +
    +}
    +
    +func (a *ActionNode) tree() *Tree {
    +	return a.tr
    +}
    +
    +func (a *ActionNode) Copy() Node {
    +	return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
    +
    +}
    +
    +// CommandNode holds a command (a pipeline inside an evaluating action).
    +type CommandNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	Args []Node // Arguments in lexical order: Identifier, field, or constant.
    +}
    +
    +func (t *Tree) newCommand(pos Pos) *CommandNode {
    +	return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
    +}
    +
    +func (c *CommandNode) append(arg Node) {
    +	c.Args = append(c.Args, arg)
    +}
    +
    +func (c *CommandNode) String() string {
    +	s := ""
    +	for i, arg := range c.Args {
    +		if i > 0 {
    +			s += " "
    +		}
    +		if arg, ok := arg.(*PipeNode); ok {
    +			s += "(" + arg.String() + ")"
    +			continue
    +		}
    +		s += arg.String()
    +	}
    +	return s
    +}
    +
    +func (c *CommandNode) tree() *Tree {
    +	return c.tr
    +}
    +
    +func (c *CommandNode) Copy() Node {
    +	if c == nil {
    +		return c
    +	}
    +	n := c.tr.newCommand(c.Pos)
    +	for _, c := range c.Args {
    +		n.append(c.Copy())
    +	}
    +	return n
    +}
    +
    +// IdentifierNode holds an identifier.
    +type IdentifierNode struct {
    +	NodeType
    +	Pos
    +	tr    *Tree
    +	Ident string // The identifier's name.
    +}
    +
    +// NewIdentifier returns a new IdentifierNode with the given identifier name.
    +func NewIdentifier(ident string) *IdentifierNode {
    +	return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
    +}
    +
    +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
    +// Chained for convenience.
    +// TODO: fix one day?
    +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
    +	i.Pos = pos
    +	return i
    +}
    +
    +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
    +// Chained for convenience.
    +// TODO: fix one day?
    +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
    +	i.tr = t
    +	return i
    +}
    +
    +func (i *IdentifierNode) String() string {
    +	return i.Ident
    +}
    +
    +func (i *IdentifierNode) tree() *Tree {
    +	return i.tr
    +}
    +
    +func (i *IdentifierNode) Copy() Node {
    +	return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
    +}
    +
    +// VariableNode holds a list of variable names, possibly with chained field
    +// accesses. The dollar sign is part of the (first) name.
    +type VariableNode struct {
    +	NodeType
    +	Pos
    +	tr    *Tree
    +	Ident []string // Variable name and fields in lexical order.
    +}
    +
    +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
    +	return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
    +}
    +
    +func (v *VariableNode) String() string {
    +	s := ""
    +	for i, id := range v.Ident {
    +		if i > 0 {
    +			s += "."
    +		}
    +		s += id
    +	}
    +	return s
    +}
    +
    +func (v *VariableNode) tree() *Tree {
    +	return v.tr
    +}
    +
    +func (v *VariableNode) Copy() Node {
    +	return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
    +}
    +
    +// DotNode holds the special identifier '.'.
    +type DotNode struct {
    +	NodeType
    +	Pos
    +	tr *Tree
    +}
    +
    +func (t *Tree) newDot(pos Pos) *DotNode {
    +	return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
    +}
    +
    +func (d *DotNode) Type() NodeType {
    +	// Override method on embedded NodeType for API compatibility.
    +	// TODO: Not really a problem; could change API without effect but
    +	// api tool complains.
    +	return NodeDot
    +}
    +
    +func (d *DotNode) String() string {
    +	return "."
    +}
    +
    +func (d *DotNode) tree() *Tree {
    +	return d.tr
    +}
    +
    +func (d *DotNode) Copy() Node {
    +	return d.tr.newDot(d.Pos)
    +}
    +
    +// NilNode holds the special identifier 'nil' representing an untyped nil constant.
    +type NilNode struct {
    +	NodeType
    +	Pos
    +	tr *Tree
    +}
    +
    +func (t *Tree) newNil(pos Pos) *NilNode {
    +	return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
    +}
    +
    +func (n *NilNode) Type() NodeType {
    +	// Override method on embedded NodeType for API compatibility.
    +	// TODO: Not really a problem; could change API without effect but
    +	// api tool complains.
    +	return NodeNil
    +}
    +
    +func (n *NilNode) String() string {
    +	return "nil"
    +}
    +
    +func (n *NilNode) tree() *Tree {
    +	return n.tr
    +}
    +
    +func (n *NilNode) Copy() Node {
    +	return n.tr.newNil(n.Pos)
    +}
    +
    +// FieldNode holds a field (identifier starting with '.').
    +// The names may be chained ('.x.y').
    +// The period is dropped from each ident.
    +type FieldNode struct {
    +	NodeType
    +	Pos
    +	tr    *Tree
    +	Ident []string // The identifiers in lexical order.
    +}
    +
    +func (t *Tree) newField(pos Pos, ident string) *FieldNode {
    +	return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
    +}
    +
    +func (f *FieldNode) String() string {
    +	s := ""
    +	for _, id := range f.Ident {
    +		s += "." + id
    +	}
    +	return s
    +}
    +
    +func (f *FieldNode) tree() *Tree {
    +	return f.tr
    +}
    +
    +func (f *FieldNode) Copy() Node {
    +	return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
    +}
    +
    +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
    +// The names may be chained ('.x.y').
    +// The periods are dropped from each ident.
    +type ChainNode struct {
    +	NodeType
    +	Pos
    +	tr    *Tree
    +	Node  Node
    +	Field []string // The identifiers in lexical order.
    +}
    +
    +func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
    +	return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
    +}
    +
    +// Add adds the named field (which should start with a period) to the end of the chain.
    +func (c *ChainNode) Add(field string) {
    +	if len(field) == 0 || field[0] != '.' {
    +		panic("no dot in field")
    +	}
    +	field = field[1:] // Remove leading dot.
    +	if field == "" {
    +		panic("empty field")
    +	}
    +	c.Field = append(c.Field, field)
    +}
    +
    +func (c *ChainNode) String() string {
    +	s := c.Node.String()
    +	if _, ok := c.Node.(*PipeNode); ok {
    +		s = "(" + s + ")"
    +	}
    +	for _, field := range c.Field {
    +		s += "." + field
    +	}
    +	return s
    +}
    +
    +func (c *ChainNode) tree() *Tree {
    +	return c.tr
    +}
    +
    +func (c *ChainNode) Copy() Node {
    +	return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
    +}
    +
    +// BoolNode holds a boolean constant.
    +type BoolNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	True bool // The value of the boolean constant.
    +}
    +
    +func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
    +	return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
    +}
    +
    +func (b *BoolNode) String() string {
    +	if b.True {
    +		return "true"
    +	}
    +	return "false"
    +}
    +
    +func (b *BoolNode) tree() *Tree {
    +	return b.tr
    +}
    +
    +func (b *BoolNode) Copy() Node {
    +	return b.tr.newBool(b.Pos, b.True)
    +}
    +
    +// NumberNode holds a number: signed or unsigned integer, float, or complex.
    +// The value is parsed and stored under all the types that can represent the value.
    +// This simulates in a small amount of code the behavior of Go's ideal constants.
    +type NumberNode struct {
    +	NodeType
    +	Pos
    +	tr         *Tree
    +	IsInt      bool       // Number has an integral value.
    +	IsUint     bool       // Number has an unsigned integral value.
    +	IsFloat    bool       // Number has a floating-point value.
    +	IsComplex  bool       // Number is complex.
    +	Int64      int64      // The signed integer value.
    +	Uint64     uint64     // The unsigned integer value.
    +	Float64    float64    // The floating-point value.
    +	Complex128 complex128 // The complex value.
    +	Text       string     // The original textual representation from the input.
    +}
    +
    +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
    +	n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
    +	switch typ {
    +	case itemCharConstant:
    +		rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
    +		if err != nil {
    +			return nil, err
    +		}
    +		if tail != "'" {
    +			return nil, fmt.Errorf("malformed character constant: %s", text)
    +		}
    +		n.Int64 = int64(rune)
    +		n.IsInt = true
    +		n.Uint64 = uint64(rune)
    +		n.IsUint = true
    +		n.Float64 = float64(rune) // odd but those are the rules.
    +		n.IsFloat = true
    +		return n, nil
    +	case itemComplex:
    +		// fmt.Sscan can parse the pair, so let it do the work.
    +		if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
    +			return nil, err
    +		}
    +		n.IsComplex = true
    +		n.simplifyComplex()
    +		return n, nil
    +	}
    +	// Imaginary constants can only be complex unless they are zero.
    +	if len(text) > 0 && text[len(text)-1] == 'i' {
    +		f, err := strconv.ParseFloat(text[:len(text)-1], 64)
    +		if err == nil {
    +			n.IsComplex = true
    +			n.Complex128 = complex(0, f)
    +			n.simplifyComplex()
    +			return n, nil
    +		}
    +	}
    +	// Do integer test first so we get 0x123 etc.
    +	u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
    +	if err == nil {
    +		n.IsUint = true
    +		n.Uint64 = u
    +	}
    +	i, err := strconv.ParseInt(text, 0, 64)
    +	if err == nil {
    +		n.IsInt = true
    +		n.Int64 = i
    +		if i == 0 {
    +			n.IsUint = true // in case of -0.
    +			n.Uint64 = u
    +		}
    +	}
    +	// If an integer extraction succeeded, promote the float.
    +	if n.IsInt {
    +		n.IsFloat = true
    +		n.Float64 = float64(n.Int64)
    +	} else if n.IsUint {
    +		n.IsFloat = true
    +		n.Float64 = float64(n.Uint64)
    +	} else {
    +		f, err := strconv.ParseFloat(text, 64)
    +		if err == nil {
    +			n.IsFloat = true
    +			n.Float64 = f
    +			// If a floating-point extraction succeeded, extract the int if needed.
    +			if !n.IsInt && float64(int64(f)) == f {
    +				n.IsInt = true
    +				n.Int64 = int64(f)
    +			}
    +			if !n.IsUint && float64(uint64(f)) == f {
    +				n.IsUint = true
    +				n.Uint64 = uint64(f)
    +			}
    +		}
    +	}
    +	if !n.IsInt && !n.IsUint && !n.IsFloat {
    +		return nil, fmt.Errorf("illegal number syntax: %q", text)
    +	}
    +	return n, nil
    +}
    +
    +// simplifyComplex pulls out any other types that are represented by the complex number.
    +// These all require that the imaginary part be zero.
    +func (n *NumberNode) simplifyComplex() {
    +	n.IsFloat = imag(n.Complex128) == 0
    +	if n.IsFloat {
    +		n.Float64 = real(n.Complex128)
    +		n.IsInt = float64(int64(n.Float64)) == n.Float64
    +		if n.IsInt {
    +			n.Int64 = int64(n.Float64)
    +		}
    +		n.IsUint = float64(uint64(n.Float64)) == n.Float64
    +		if n.IsUint {
    +			n.Uint64 = uint64(n.Float64)
    +		}
    +	}
    +}
    +
    +func (n *NumberNode) String() string {
    +	return n.Text
    +}
    +
    +func (n *NumberNode) tree() *Tree {
    +	return n.tr
    +}
    +
    +func (n *NumberNode) Copy() Node {
    +	nn := new(NumberNode)
    +	*nn = *n // Easy, fast, correct.
    +	return nn
    +}
    +
    +// StringNode holds a string constant. The value has been "unquoted".
    +type StringNode struct {
    +	NodeType
    +	Pos
    +	tr     *Tree
    +	Quoted string // The original text of the string, with quotes.
    +	Text   string // The string, after quote processing.
    +}
    +
    +func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
    +	return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
    +}
    +
    +func (s *StringNode) String() string {
    +	return s.Quoted
    +}
    +
    +func (s *StringNode) tree() *Tree {
    +	return s.tr
    +}
    +
    +func (s *StringNode) Copy() Node {
    +	return s.tr.newString(s.Pos, s.Quoted, s.Text)
    +}
    +
    +// endNode represents an {{end}} action.
    +// It does not appear in the final parse tree.
    +type endNode struct {
    +	NodeType
    +	Pos
    +	tr *Tree
    +}
    +
    +func (t *Tree) newEnd(pos Pos) *endNode {
    +	return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
    +}
    +
    +func (e *endNode) String() string {
    +	return "{{end}}"
    +}
    +
    +func (e *endNode) tree() *Tree {
    +	return e.tr
    +}
    +
    +func (e *endNode) Copy() Node {
    +	return e.tr.newEnd(e.Pos)
    +}
    +
    +// elseNode represents an {{else}} action. Does not appear in the final tree.
    +type elseNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	Line int // The line number in the input (deprecated; kept for compatibility)
    +}
    +
    +func (t *Tree) newElse(pos Pos, line int) *elseNode {
    +	return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
    +}
    +
    +func (e *elseNode) Type() NodeType {
    +	return nodeElse
    +}
    +
    +func (e *elseNode) String() string {
    +	return "{{else}}"
    +}
    +
    +func (e *elseNode) tree() *Tree {
    +	return e.tr
    +}
    +
    +func (e *elseNode) Copy() Node {
    +	return e.tr.newElse(e.Pos, e.Line)
    +}
    +
    +// BranchNode is the common representation of if, range, and with.
    +type BranchNode struct {
    +	NodeType
    +	Pos
    +	tr       *Tree
    +	Line     int       // The line number in the input (deprecated; kept for compatibility)
    +	Pipe     *PipeNode // The pipeline to be evaluated.
    +	List     *ListNode // What to execute if the value is non-empty.
    +	ElseList *ListNode // What to execute if the value is empty (nil if absent).
    +}
    +
    +func (b *BranchNode) String() string {
    +	name := ""
    +	switch b.NodeType {
    +	case NodeIf:
    +		name = "if"
    +	case NodeRange:
    +		name = "range"
    +	case NodeWith:
    +		name = "with"
    +	default:
    +		panic("unknown branch type")
    +	}
    +	if b.ElseList != nil {
    +		return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
    +	}
    +	return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
    +}
    +
    +func (b *BranchNode) tree() *Tree {
    +	return b.tr
    +}
    +
    +func (b *BranchNode) Copy() Node {
    +	switch b.NodeType {
    +	case NodeIf:
    +		return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
    +	case NodeRange:
    +		return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
    +	case NodeWith:
    +		return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
    +	default:
    +		panic("unknown branch type")
    +	}
    +}
    +
    +// IfNode represents an {{if}} action and its commands.
    +type IfNode struct {
    +	BranchNode
    +}
    +
    +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
    +	return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
    +}
    +
    +func (i *IfNode) Copy() Node {
    +	return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
    +}
    +
    +// RangeNode represents a {{range}} action and its commands.
    +type RangeNode struct {
    +	BranchNode
    +}
    +
    +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
    +	return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
    +}
    +
    +func (r *RangeNode) Copy() Node {
    +	return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
    +}
    +
    +// WithNode represents a {{with}} action and its commands.
    +type WithNode struct {
    +	BranchNode
    +}
    +
    +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
    +	return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
    +}
    +
    +func (w *WithNode) Copy() Node {
    +	return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
    +}
    +
    +// TemplateNode represents a {{template}} action.
    +type TemplateNode struct {
    +	NodeType
    +	Pos
    +	tr   *Tree
    +	Line int       // The line number in the input (deprecated; kept for compatibility)
    +	Name string    // The name of the template (unquoted).
    +	Pipe *PipeNode // The command to evaluate as dot for the template.
    +}
    +
    +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
    +	return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
    +}
    +
    +func (t *TemplateNode) String() string {
    +	if t.Pipe == nil {
    +		return fmt.Sprintf("{{template %q}}", t.Name)
    +	}
    +	return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
    +}
    +
    +func (t *TemplateNode) tree() *Tree {
    +	return t.tr
    +}
    +
    +func (t *TemplateNode) Copy() Node {
    +	return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/parse/parse.go b/src/prometheus/vendor/github.com/alecthomas/template/parse/parse.go
    new file mode 100644
    index 0000000..0d77ade
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/parse/parse.go
    @@ -0,0 +1,700 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Package parse builds parse trees for templates as defined by text/template
    +// and html/template. Clients should use those packages to construct templates
    +// rather than this one, which provides shared internal data structures not
    +// intended for general use.
    +package parse
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"runtime"
    +	"strconv"
    +	"strings"
    +)
    +
    +// Tree is the representation of a single parsed template.
    +type Tree struct {
    +	Name      string    // name of the template represented by the tree.
    +	ParseName string    // name of the top-level template during parsing, for error messages.
    +	Root      *ListNode // top-level root of the tree.
    +	text      string    // text parsed to create the template (or its parent)
    +	// Parsing only; cleared after parse.
    +	funcs     []map[string]interface{}
    +	lex       *lexer
    +	token     [3]item // three-token lookahead for parser.
    +	peekCount int
    +	vars      []string // variables defined at the moment.
    +}
    +
    +// Copy returns a copy of the Tree. Any parsing state is discarded.
    +func (t *Tree) Copy() *Tree {
    +	if t == nil {
    +		return nil
    +	}
    +	return &Tree{
    +		Name:      t.Name,
    +		ParseName: t.ParseName,
    +		Root:      t.Root.CopyList(),
    +		text:      t.text,
    +	}
    +}
    +
    +// Parse returns a map from template name to parse.Tree, created by parsing the
    +// templates described in the argument string. The top-level template will be
    +// given the specified name. If an error is encountered, parsing stops and an
    +// empty map is returned with the error.
    +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
    +	treeSet = make(map[string]*Tree)
    +	t := New(name)
    +	t.text = text
    +	_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
    +	return
    +}
    +
    +// next returns the next token.
    +func (t *Tree) next() item {
    +	if t.peekCount > 0 {
    +		t.peekCount--
    +	} else {
    +		t.token[0] = t.lex.nextItem()
    +	}
    +	return t.token[t.peekCount]
    +}
    +
    +// backup backs the input stream up one token.
    +func (t *Tree) backup() {
    +	t.peekCount++
    +}
    +
    +// backup2 backs the input stream up two tokens.
    +// The zeroth token is already there.
    +func (t *Tree) backup2(t1 item) {
    +	t.token[1] = t1
    +	t.peekCount = 2
    +}
    +
    +// backup3 backs the input stream up three tokens
    +// The zeroth token is already there.
    +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
    +	t.token[1] = t1
    +	t.token[2] = t2
    +	t.peekCount = 3
    +}
    +
    +// peek returns but does not consume the next token.
    +func (t *Tree) peek() item {
    +	if t.peekCount > 0 {
    +		return t.token[t.peekCount-1]
    +	}
    +	t.peekCount = 1
    +	t.token[0] = t.lex.nextItem()
    +	return t.token[0]
    +}
    +
    +// nextNonSpace returns the next non-space token.
    +func (t *Tree) nextNonSpace() (token item) {
    +	for {
    +		token = t.next()
    +		if token.typ != itemSpace {
    +			break
    +		}
    +	}
    +	return token
    +}
    +
    +// peekNonSpace returns but does not consume the next non-space token.
    +func (t *Tree) peekNonSpace() (token item) {
    +	for {
    +		token = t.next()
    +		if token.typ != itemSpace {
    +			break
    +		}
    +	}
    +	t.backup()
    +	return token
    +}
    +
    +// Parsing.
    +
    +// New allocates a new parse tree with the given name.
    +func New(name string, funcs ...map[string]interface{}) *Tree {
    +	return &Tree{
    +		Name:  name,
    +		funcs: funcs,
    +	}
    +}
    +
    +// ErrorContext returns a textual representation of the location of the node in the input text.
    +// The receiver is only used when the node does not have a pointer to the tree inside,
    +// which can occur in old code.
    +func (t *Tree) ErrorContext(n Node) (location, context string) {
    +	pos := int(n.Position())
    +	tree := n.tree()
    +	if tree == nil {
    +		tree = t
    +	}
    +	text := tree.text[:pos]
    +	byteNum := strings.LastIndex(text, "\n")
    +	if byteNum == -1 {
    +		byteNum = pos // On first line.
    +	} else {
    +		byteNum++ // After the newline.
    +		byteNum = pos - byteNum
    +	}
    +	lineNum := 1 + strings.Count(text, "\n")
    +	context = n.String()
    +	if len(context) > 20 {
    +		context = fmt.Sprintf("%.20s...", context)
    +	}
    +	return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
    +}
    +
    +// errorf formats the error and terminates processing.
    +func (t *Tree) errorf(format string, args ...interface{}) {
    +	t.Root = nil
    +	format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
    +	panic(fmt.Errorf(format, args...))
    +}
    +
    +// error terminates processing.
    +func (t *Tree) error(err error) {
    +	t.errorf("%s", err)
    +}
    +
    +// expect consumes the next token and guarantees it has the required type.
    +func (t *Tree) expect(expected itemType, context string) item {
    +	token := t.nextNonSpace()
    +	if token.typ != expected {
    +		t.unexpected(token, context)
    +	}
    +	return token
    +}
    +
    +// expectOneOf consumes the next token and guarantees it has one of the required types.
    +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
    +	token := t.nextNonSpace()
    +	if token.typ != expected1 && token.typ != expected2 {
    +		t.unexpected(token, context)
    +	}
    +	return token
    +}
    +
    +// unexpected complains about the token and terminates processing.
    +func (t *Tree) unexpected(token item, context string) {
    +	t.errorf("unexpected %s in %s", token, context)
    +}
    +
    +// recover is the handler that turns panics into returns from the top level of Parse.
    +func (t *Tree) recover(errp *error) {
    +	e := recover()
    +	if e != nil {
    +		if _, ok := e.(runtime.Error); ok {
    +			panic(e)
    +		}
    +		if t != nil {
    +			t.stopParse()
    +		}
    +		*errp = e.(error)
    +	}
    +	return
    +}
    +
    +// startParse initializes the parser, using the lexer.
    +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
    +	t.Root = nil
    +	t.lex = lex
    +	t.vars = []string{"$"}
    +	t.funcs = funcs
    +}
    +
    +// stopParse terminates parsing.
    +func (t *Tree) stopParse() {
    +	t.lex = nil
    +	t.vars = nil
    +	t.funcs = nil
    +}
    +
    +// Parse parses the template definition string to construct a representation of
    +// the template for execution. If either action delimiter string is empty, the
    +// default ("{{" or "}}") is used. Embedded template definitions are added to
    +// the treeSet map.
    +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
    +	defer t.recover(&err)
    +	t.ParseName = t.Name
    +	t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
    +	t.text = text
    +	t.parse(treeSet)
    +	t.add(treeSet)
    +	t.stopParse()
    +	return t, nil
    +}
    +
    +// add adds tree to the treeSet.
    +func (t *Tree) add(treeSet map[string]*Tree) {
    +	tree := treeSet[t.Name]
    +	if tree == nil || IsEmptyTree(tree.Root) {
    +		treeSet[t.Name] = t
    +		return
    +	}
    +	if !IsEmptyTree(t.Root) {
    +		t.errorf("template: multiple definition of template %q", t.Name)
    +	}
    +}
    +
    +// IsEmptyTree reports whether this tree (node) is empty of everything but space.
    +func IsEmptyTree(n Node) bool {
    +	switch n := n.(type) {
    +	case nil:
    +		return true
    +	case *ActionNode:
    +	case *IfNode:
    +	case *ListNode:
    +		for _, node := range n.Nodes {
    +			if !IsEmptyTree(node) {
    +				return false
    +			}
    +		}
    +		return true
    +	case *RangeNode:
    +	case *TemplateNode:
    +	case *TextNode:
    +		return len(bytes.TrimSpace(n.Text)) == 0
    +	case *WithNode:
    +	default:
    +		panic("unknown node: " + n.String())
    +	}
    +	return false
    +}
    +
    +// parse is the top-level parser for a template, essentially the same
    +// as itemList except it also parses {{define}} actions.
    +// It runs to EOF.
    +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
    +	t.Root = t.newList(t.peek().pos)
    +	for t.peek().typ != itemEOF {
    +		if t.peek().typ == itemLeftDelim {
    +			delim := t.next()
    +			if t.nextNonSpace().typ == itemDefine {
    +				newT := New("definition") // name will be updated once we know it.
    +				newT.text = t.text
    +				newT.ParseName = t.ParseName
    +				newT.startParse(t.funcs, t.lex)
    +				newT.parseDefinition(treeSet)
    +				continue
    +			}
    +			t.backup2(delim)
    +		}
    +		n := t.textOrAction()
    +		if n.Type() == nodeEnd {
    +			t.errorf("unexpected %s", n)
    +		}
    +		t.Root.append(n)
    +	}
    +	return nil
    +}
    +
    +// parseDefinition parses a {{define}} ...  {{end}} template definition and
    +// installs the definition in the treeSet map.  The "define" keyword has already
    +// been scanned.
    +func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
    +	const context = "define clause"
    +	name := t.expectOneOf(itemString, itemRawString, context)
    +	var err error
    +	t.Name, err = strconv.Unquote(name.val)
    +	if err != nil {
    +		t.error(err)
    +	}
    +	t.expect(itemRightDelim, context)
    +	var end Node
    +	t.Root, end = t.itemList()
    +	if end.Type() != nodeEnd {
    +		t.errorf("unexpected %s in %s", end, context)
    +	}
    +	t.add(treeSet)
    +	t.stopParse()
    +}
    +
    +// itemList:
    +//	textOrAction*
    +// Terminates at {{end}} or {{else}}, returned separately.
    +func (t *Tree) itemList() (list *ListNode, next Node) {
    +	list = t.newList(t.peekNonSpace().pos)
    +	for t.peekNonSpace().typ != itemEOF {
    +		n := t.textOrAction()
    +		switch n.Type() {
    +		case nodeEnd, nodeElse:
    +			return list, n
    +		}
    +		list.append(n)
    +	}
    +	t.errorf("unexpected EOF")
    +	return
    +}
    +
    +// textOrAction:
    +//	text | action
    +func (t *Tree) textOrAction() Node {
    +	switch token := t.nextNonSpace(); token.typ {
    +	case itemElideNewline:
    +		return t.elideNewline()
    +	case itemText:
    +		return t.newText(token.pos, token.val)
    +	case itemLeftDelim:
    +		return t.action()
    +	default:
    +		t.unexpected(token, "input")
    +	}
    +	return nil
    +}
    +
    +// elideNewline:
    +// Remove newlines trailing rightDelim if \\ is present.
    +func (t *Tree) elideNewline() Node {
    +	token := t.peek()
    +	if token.typ != itemText {
    +		t.unexpected(token, "input")
    +		return nil
    +	}
    +
    +	t.next()
    +	stripped := strings.TrimLeft(token.val, "\n\r")
    +	diff := len(token.val) - len(stripped)
    +	if diff > 0 {
    +		// This is a bit nasty. We mutate the token in-place to remove
    +		// preceding newlines.
    +		token.pos += Pos(diff)
    +		token.val = stripped
    +	}
    +	return t.newText(token.pos, token.val)
    +}
    +
    +// Action:
    +//	control
    +//	command ("|" command)*
    +// Left delim is past. Now get actions.
    +// First word could be a keyword such as range.
    +func (t *Tree) action() (n Node) {
    +	switch token := t.nextNonSpace(); token.typ {
    +	case itemElse:
    +		return t.elseControl()
    +	case itemEnd:
    +		return t.endControl()
    +	case itemIf:
    +		return t.ifControl()
    +	case itemRange:
    +		return t.rangeControl()
    +	case itemTemplate:
    +		return t.templateControl()
    +	case itemWith:
    +		return t.withControl()
    +	}
    +	t.backup()
    +	// Do not pop variables; they persist until "end".
    +	return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
    +}
    +
    +// Pipeline:
    +//	declarations? command ('|' command)*
    +func (t *Tree) pipeline(context string) (pipe *PipeNode) {
    +	var decl []*VariableNode
    +	pos := t.peekNonSpace().pos
    +	// Are there declarations?
    +	for {
    +		if v := t.peekNonSpace(); v.typ == itemVariable {
    +			t.next()
    +			// Since space is a token, we need 3-token look-ahead here in the worst case:
    +			// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
    +			// argument variable rather than a declaration. So remember the token
    +			// adjacent to the variable so we can push it back if necessary.
    +			tokenAfterVariable := t.peek()
    +			if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
    +				t.nextNonSpace()
    +				variable := t.newVariable(v.pos, v.val)
    +				decl = append(decl, variable)
    +				t.vars = append(t.vars, v.val)
    +				if next.typ == itemChar && next.val == "," {
    +					if context == "range" && len(decl) < 2 {
    +						continue
    +					}
    +					t.errorf("too many declarations in %s", context)
    +				}
    +			} else if tokenAfterVariable.typ == itemSpace {
    +				t.backup3(v, tokenAfterVariable)
    +			} else {
    +				t.backup2(v)
    +			}
    +		}
    +		break
    +	}
    +	pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
    +	for {
    +		switch token := t.nextNonSpace(); token.typ {
    +		case itemRightDelim, itemRightParen:
    +			if len(pipe.Cmds) == 0 {
    +				t.errorf("missing value for %s", context)
    +			}
    +			if token.typ == itemRightParen {
    +				t.backup()
    +			}
    +			return
    +		case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
    +			itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
    +			t.backup()
    +			pipe.append(t.command())
    +		default:
    +			t.unexpected(token, context)
    +		}
    +	}
    +}
    +
    +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
    +	defer t.popVars(len(t.vars))
    +	line = t.lex.lineNumber()
    +	pipe = t.pipeline(context)
    +	var next Node
    +	list, next = t.itemList()
    +	switch next.Type() {
    +	case nodeEnd: //done
    +	case nodeElse:
    +		if allowElseIf {
    +			// Special case for "else if". If the "else" is followed immediately by an "if",
    +			// the elseControl will have left the "if" token pending. Treat
    +			//	{{if a}}_{{else if b}}_{{end}}
    +			// as
    +			//	{{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
    +			// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
    +			// is assumed. This technique works even for long if-else-if chains.
    +			// TODO: Should we allow else-if in with and range?
    +			if t.peek().typ == itemIf {
    +				t.next() // Consume the "if" token.
    +				elseList = t.newList(next.Position())
    +				elseList.append(t.ifControl())
    +				// Do not consume the next item - only one {{end}} required.
    +				break
    +			}
    +		}
    +		elseList, next = t.itemList()
    +		if next.Type() != nodeEnd {
    +			t.errorf("expected end; found %s", next)
    +		}
    +	}
    +	return pipe.Position(), line, pipe, list, elseList
    +}
    +
    +// If:
    +//	{{if pipeline}} itemList {{end}}
    +//	{{if pipeline}} itemList {{else}} itemList {{end}}
    +// If keyword is past.
    +func (t *Tree) ifControl() Node {
    +	return t.newIf(t.parseControl(true, "if"))
    +}
    +
    +// Range:
    +//	{{range pipeline}} itemList {{end}}
    +//	{{range pipeline}} itemList {{else}} itemList {{end}}
    +// Range keyword is past.
    +func (t *Tree) rangeControl() Node {
    +	return t.newRange(t.parseControl(false, "range"))
    +}
    +
    +// With:
    +//	{{with pipeline}} itemList {{end}}
    +//	{{with pipeline}} itemList {{else}} itemList {{end}}
    +// If keyword is past.
    +func (t *Tree) withControl() Node {
    +	return t.newWith(t.parseControl(false, "with"))
    +}
    +
    +// End:
    +//	{{end}}
    +// End keyword is past.
    +func (t *Tree) endControl() Node {
    +	return t.newEnd(t.expect(itemRightDelim, "end").pos)
    +}
    +
    +// Else:
    +//	{{else}}
    +// Else keyword is past.
    +func (t *Tree) elseControl() Node {
    +	// Special case for "else if".
    +	peek := t.peekNonSpace()
    +	if peek.typ == itemIf {
    +		// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
    +		return t.newElse(peek.pos, t.lex.lineNumber())
    +	}
    +	return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
    +}
    +
    +// Template:
    +//	{{template stringValue pipeline}}
    +// Template keyword is past.  The name must be something that can evaluate
    +// to a string.
    +func (t *Tree) templateControl() Node {
    +	var name string
    +	token := t.nextNonSpace()
    +	switch token.typ {
    +	case itemString, itemRawString:
    +		s, err := strconv.Unquote(token.val)
    +		if err != nil {
    +			t.error(err)
    +		}
    +		name = s
    +	default:
    +		t.unexpected(token, "template invocation")
    +	}
    +	var pipe *PipeNode
    +	if t.nextNonSpace().typ != itemRightDelim {
    +		t.backup()
    +		// Do not pop variables; they persist until "end".
    +		pipe = t.pipeline("template")
    +	}
    +	return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
    +}
    +
    +// command:
    +//	operand (space operand)*
    +// space-separated arguments up to a pipeline character or right delimiter.
    +// we consume the pipe character but leave the right delim to terminate the action.
    +func (t *Tree) command() *CommandNode {
    +	cmd := t.newCommand(t.peekNonSpace().pos)
    +	for {
    +		t.peekNonSpace() // skip leading spaces.
    +		operand := t.operand()
    +		if operand != nil {
    +			cmd.append(operand)
    +		}
    +		switch token := t.next(); token.typ {
    +		case itemSpace:
    +			continue
    +		case itemError:
    +			t.errorf("%s", token.val)
    +		case itemRightDelim, itemRightParen:
    +			t.backup()
    +		case itemPipe:
    +		default:
    +			t.errorf("unexpected %s in operand; missing space?", token)
    +		}
    +		break
    +	}
    +	if len(cmd.Args) == 0 {
    +		t.errorf("empty command")
    +	}
    +	return cmd
    +}
    +
    +// operand:
    +//	term .Field*
    +// An operand is a space-separated component of a command,
    +// a term possibly followed by field accesses.
    +// A nil return means the next item is not an operand.
    +func (t *Tree) operand() Node {
    +	node := t.term()
    +	if node == nil {
    +		return nil
    +	}
    +	if t.peek().typ == itemField {
    +		chain := t.newChain(t.peek().pos, node)
    +		for t.peek().typ == itemField {
    +			chain.Add(t.next().val)
    +		}
    +		// Compatibility with original API: If the term is of type NodeField
    +		// or NodeVariable, just put more fields on the original.
    +		// Otherwise, keep the Chain node.
    +		// TODO: Switch to Chains always when we can.
    +		switch node.Type() {
    +		case NodeField:
    +			node = t.newField(chain.Position(), chain.String())
    +		case NodeVariable:
    +			node = t.newVariable(chain.Position(), chain.String())
    +		default:
    +			node = chain
    +		}
    +	}
    +	return node
    +}
    +
    +// term:
    +//	literal (number, string, nil, boolean)
    +//	function (identifier)
    +//	.
    +//	.Field
    +//	$
    +//	'(' pipeline ')'
    +// A term is a simple "expression".
    +// A nil return means the next item is not a term.
    +func (t *Tree) term() Node {
    +	switch token := t.nextNonSpace(); token.typ {
    +	case itemError:
    +		t.errorf("%s", token.val)
    +	case itemIdentifier:
    +		if !t.hasFunction(token.val) {
    +			t.errorf("function %q not defined", token.val)
    +		}
    +		return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
    +	case itemDot:
    +		return t.newDot(token.pos)
    +	case itemNil:
    +		return t.newNil(token.pos)
    +	case itemVariable:
    +		return t.useVar(token.pos, token.val)
    +	case itemField:
    +		return t.newField(token.pos, token.val)
    +	case itemBool:
    +		return t.newBool(token.pos, token.val == "true")
    +	case itemCharConstant, itemComplex, itemNumber:
    +		number, err := t.newNumber(token.pos, token.val, token.typ)
    +		if err != nil {
    +			t.error(err)
    +		}
    +		return number
    +	case itemLeftParen:
    +		pipe := t.pipeline("parenthesized pipeline")
    +		if token := t.next(); token.typ != itemRightParen {
    +			t.errorf("unclosed right paren: unexpected %s", token)
    +		}
    +		return pipe
    +	case itemString, itemRawString:
    +		s, err := strconv.Unquote(token.val)
    +		if err != nil {
    +			t.error(err)
    +		}
    +		return t.newString(token.pos, token.val, s)
    +	}
    +	t.backup()
    +	return nil
    +}
    +
    +// hasFunction reports if a function name exists in the Tree's maps.
    +func (t *Tree) hasFunction(name string) bool {
    +	for _, funcMap := range t.funcs {
    +		if funcMap == nil {
    +			continue
    +		}
    +		if funcMap[name] != nil {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// popVars trims the variable list to the specified length
    +func (t *Tree) popVars(n int) {
    +	t.vars = t.vars[:n]
    +}
    +
    +// useVar returns a node for a variable reference. It errors if the
    +// variable is not defined.
    +func (t *Tree) useVar(pos Pos, name string) Node {
    +	v := t.newVariable(pos, name)
    +	for _, varName := range t.vars {
    +		if varName == v.Ident[0] {
    +			return v
    +		}
    +	}
    +	t.errorf("undefined variable %q", v.Ident[0])
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/template/template.go b/src/prometheus/vendor/github.com/alecthomas/template/template.go
    new file mode 100644
    index 0000000..447ed2a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/template/template.go
    @@ -0,0 +1,218 @@
    +// Copyright 2011 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +package template
    +
    +import (
    +	"fmt"
    +	"reflect"
    +
    +	"github.com/alecthomas/template/parse"
    +)
    +
    +// common holds the information shared by related templates.
    +type common struct {
    +	tmpl map[string]*Template
    +	// We use two maps, one for parsing and one for execution.
    +	// This separation makes the API cleaner since it doesn't
    +	// expose reflection to the client.
    +	parseFuncs FuncMap
    +	execFuncs  map[string]reflect.Value
    +}
    +
    +// Template is the representation of a parsed template. The *parse.Tree
    +// field is exported only for use by html/template and should be treated
    +// as unexported by all other clients.
    +type Template struct {
    +	name string
    +	*parse.Tree
    +	*common
    +	leftDelim  string
    +	rightDelim string
    +}
    +
    +// New allocates a new template with the given name.
    +func New(name string) *Template {
    +	return &Template{
    +		name: name,
    +	}
    +}
    +
    +// Name returns the name of the template.
    +func (t *Template) Name() string {
    +	return t.name
    +}
    +
    +// New allocates a new template associated with the given one and with the same
    +// delimiters. The association, which is transitive, allows one template to
    +// invoke another with a {{template}} action.
    +func (t *Template) New(name string) *Template {
    +	t.init()
    +	return &Template{
    +		name:       name,
    +		common:     t.common,
    +		leftDelim:  t.leftDelim,
    +		rightDelim: t.rightDelim,
    +	}
    +}
    +
    +func (t *Template) init() {
    +	if t.common == nil {
    +		t.common = new(common)
    +		t.tmpl = make(map[string]*Template)
    +		t.parseFuncs = make(FuncMap)
    +		t.execFuncs = make(map[string]reflect.Value)
    +	}
    +}
    +
    +// Clone returns a duplicate of the template, including all associated
    +// templates. The actual representation is not copied, but the name space of
    +// associated templates is, so further calls to Parse in the copy will add
    +// templates to the copy but not to the original. Clone can be used to prepare
    +// common templates and use them with variant definitions for other templates
    +// by adding the variants after the clone is made.
    +func (t *Template) Clone() (*Template, error) {
    +	nt := t.copy(nil)
    +	nt.init()
    +	nt.tmpl[t.name] = nt
    +	for k, v := range t.tmpl {
    +		if k == t.name { // Already installed.
    +			continue
    +		}
    +		// The associated templates share nt's common structure.
    +		tmpl := v.copy(nt.common)
    +		nt.tmpl[k] = tmpl
    +	}
    +	for k, v := range t.parseFuncs {
    +		nt.parseFuncs[k] = v
    +	}
    +	for k, v := range t.execFuncs {
    +		nt.execFuncs[k] = v
    +	}
    +	return nt, nil
    +}
    +
    +// copy returns a shallow copy of t, with common set to the argument.
    +func (t *Template) copy(c *common) *Template {
    +	nt := New(t.name)
    +	nt.Tree = t.Tree
    +	nt.common = c
    +	nt.leftDelim = t.leftDelim
    +	nt.rightDelim = t.rightDelim
    +	return nt
    +}
    +
    +// AddParseTree creates a new template with the name and parse tree
    +// and associates it with t.
    +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
    +	if t.common != nil && t.tmpl[name] != nil {
    +		return nil, fmt.Errorf("template: redefinition of template %q", name)
    +	}
    +	nt := t.New(name)
    +	nt.Tree = tree
    +	t.tmpl[name] = nt
    +	return nt, nil
    +}
    +
    +// Templates returns a slice of the templates associated with t, including t
    +// itself.
    +func (t *Template) Templates() []*Template {
    +	if t.common == nil {
    +		return nil
    +	}
    +	// Return a slice so we don't expose the map.
    +	m := make([]*Template, 0, len(t.tmpl))
    +	for _, v := range t.tmpl {
    +		m = append(m, v)
    +	}
    +	return m
    +}
    +
    +// Delims sets the action delimiters to the specified strings, to be used in
    +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
    +// definitions will inherit the settings. An empty delimiter stands for the
    +// corresponding default: {{ or }}.
    +// The return value is the template, so calls can be chained.
    +func (t *Template) Delims(left, right string) *Template {
    +	t.leftDelim = left
    +	t.rightDelim = right
    +	return t
    +}
    +
    +// Funcs adds the elements of the argument map to the template's function map.
    +// It panics if a value in the map is not a function with appropriate return
    +// type. However, it is legal to overwrite elements of the map. The return
    +// value is the template, so calls can be chained.
    +func (t *Template) Funcs(funcMap FuncMap) *Template {
    +	t.init()
    +	addValueFuncs(t.execFuncs, funcMap)
    +	addFuncs(t.parseFuncs, funcMap)
    +	return t
    +}
    +
    +// Lookup returns the template with the given name that is associated with t,
    +// or nil if there is no such template.
    +func (t *Template) Lookup(name string) *Template {
    +	if t.common == nil {
    +		return nil
    +	}
    +	return t.tmpl[name]
    +}
    +
    +// Parse parses a string into a template. Nested template definitions will be
    +// associated with the top-level template t. Parse may be called multiple times
    +// to parse definitions of templates to associate with t. It is an error if a
    +// resulting template is non-empty (contains content other than template
    +// definitions) and would replace a non-empty template with the same name.
    +// (In multiple calls to Parse with the same receiver template, only one call
    +// can contain text other than space, comments, and template definitions.)
    +func (t *Template) Parse(text string) (*Template, error) {
    +	t.init()
    +	trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
    +	if err != nil {
    +		return nil, err
    +	}
    +	// Add the newly parsed trees, including the one for t, into our common structure.
    +	for name, tree := range trees {
    +		// If the name we parsed is the name of this template, overwrite this template.
    +		// The associate method checks it's not a redefinition.
    +		tmpl := t
    +		if name != t.name {
    +			tmpl = t.New(name)
    +		}
    +		// Even if t == tmpl, we need to install it in the common.tmpl map.
    +		if replace, err := t.associate(tmpl, tree); err != nil {
    +			return nil, err
    +		} else if replace {
    +			tmpl.Tree = tree
    +		}
    +		tmpl.leftDelim = t.leftDelim
    +		tmpl.rightDelim = t.rightDelim
    +	}
    +	return t, nil
    +}
    +
    +// associate installs the new template into the group of templates associated
    +// with t. It is an error to reuse a name except to overwrite an empty
    +// template. The two are already known to share the common structure.
    +// The boolean return value reports wither to store this tree as t.Tree.
    +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
    +	if new.common != t.common {
    +		panic("internal error: associate not common")
    +	}
    +	name := new.name
    +	if old := t.tmpl[name]; old != nil {
    +		oldIsEmpty := parse.IsEmptyTree(old.Root)
    +		newIsEmpty := parse.IsEmptyTree(tree.Root)
    +		if newIsEmpty {
    +			// Whether old is empty or not, new is empty; no reason to replace old.
    +			return false, nil
    +		}
    +		if !oldIsEmpty {
    +			return false, fmt.Errorf("template: redefinition of template %q", name)
    +		}
    +	}
    +	t.tmpl[name] = new
    +	return true, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/units/COPYING b/src/prometheus/vendor/github.com/alecthomas/units/COPYING
    new file mode 100644
    index 0000000..2993ec0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/units/COPYING
    @@ -0,0 +1,19 @@
    +Copyright (C) 2014 Alec Thomas
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy of
    +this software and associated documentation files (the "Software"), to deal in
    +the Software without restriction, including without limitation the rights to
    +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
    +of the Software, and to permit persons to whom the Software is furnished to do
    +so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    diff --git a/src/prometheus/vendor/github.com/alecthomas/units/README.md b/src/prometheus/vendor/github.com/alecthomas/units/README.md
    new file mode 100644
    index 0000000..bee884e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/units/README.md
    @@ -0,0 +1,11 @@
    +# Units - Helpful unit multipliers and functions for Go
    +
    +The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
    +
    +It allows for code like this:
    +
    +```go
    +n, err := ParseBase2Bytes("1KB")
    +// n == 1024
    +n = units.Mebibyte * 512
    +```
    diff --git a/src/prometheus/vendor/github.com/alecthomas/units/bytes.go b/src/prometheus/vendor/github.com/alecthomas/units/bytes.go
    new file mode 100644
    index 0000000..eaadeb8
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/units/bytes.go
    @@ -0,0 +1,83 @@
    +package units
    +
    +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
    +// etc.).
    +type Base2Bytes int64
    +
    +// Base-2 byte units.
    +const (
    +	Kibibyte Base2Bytes = 1024
    +	KiB                 = Kibibyte
    +	Mebibyte            = Kibibyte * 1024
    +	MiB                 = Mebibyte
    +	Gibibyte            = Mebibyte * 1024
    +	GiB                 = Gibibyte
    +	Tebibyte            = Gibibyte * 1024
    +	TiB                 = Tebibyte
    +	Pebibyte            = Tebibyte * 1024
    +	PiB                 = Pebibyte
    +	Exbibyte            = Pebibyte * 1024
    +	EiB                 = Exbibyte
    +)
    +
    +var (
    +	bytesUnitMap    = MakeUnitMap("iB", "B", 1024)
    +	oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
    +)
    +
    +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
    +// and KiB are both 1024.
    +func ParseBase2Bytes(s string) (Base2Bytes, error) {
    +	n, err := ParseUnit(s, bytesUnitMap)
    +	if err != nil {
    +		n, err = ParseUnit(s, oldBytesUnitMap)
    +	}
    +	return Base2Bytes(n), err
    +}
    +
    +func (b Base2Bytes) String() string {
    +	return ToString(int64(b), 1024, "iB", "B")
    +}
    +
    +var (
    +	metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
    +)
    +
    +// MetricBytes are SI byte units (1000 bytes in a kilobyte).
    +type MetricBytes SI
    +
    +// SI base-10 byte units.
    +const (
    +	Kilobyte MetricBytes = 1000
    +	KB                   = Kilobyte
    +	Megabyte             = Kilobyte * 1000
    +	MB                   = Megabyte
    +	Gigabyte             = Megabyte * 1000
    +	GB                   = Gigabyte
    +	Terabyte             = Gigabyte * 1000
    +	TB                   = Terabyte
    +	Petabyte             = Terabyte * 1000
    +	PB                   = Petabyte
    +	Exabyte              = Petabyte * 1000
    +	EB                   = Exabyte
    +)
    +
    +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
    +func ParseMetricBytes(s string) (MetricBytes, error) {
    +	n, err := ParseUnit(s, metricBytesUnitMap)
    +	return MetricBytes(n), err
    +}
    +
    +func (m MetricBytes) String() string {
    +	return ToString(int64(m), 1000, "B", "B")
    +}
    +
    +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
    +// respectively. That is, KiB represents 1024 and KB represents 1000.
    +func ParseStrictBytes(s string) (int64, error) {
    +	n, err := ParseUnit(s, bytesUnitMap)
    +	if err != nil {
    +		n, err = ParseUnit(s, metricBytesUnitMap)
    +	}
    +	return int64(n), err
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/units/doc.go b/src/prometheus/vendor/github.com/alecthomas/units/doc.go
    new file mode 100644
    index 0000000..156ae38
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/units/doc.go
    @@ -0,0 +1,13 @@
    +// Package units provides helpful unit multipliers and functions for Go.
    +//
    +// The goal of this package is to have functionality similar to the time [1] package.
    +//
    +//
    +// [1] http://golang.org/pkg/time/
    +//
    +// It allows for code like this:
    +//
    +//     n, err := ParseBase2Bytes("1KB")
    +//     // n == 1024
    +//     n = units.Mebibyte * 512
    +package units
    diff --git a/src/prometheus/vendor/github.com/alecthomas/units/si.go b/src/prometheus/vendor/github.com/alecthomas/units/si.go
    new file mode 100644
    index 0000000..8234a9d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/units/si.go
    @@ -0,0 +1,26 @@
    +package units
    +
    +// SI units.
    +type SI int64
    +
    +// SI unit multiples.
    +const (
    +	Kilo SI = 1000
    +	Mega    = Kilo * 1000
    +	Giga    = Mega * 1000
    +	Tera    = Giga * 1000
    +	Peta    = Tera * 1000
    +	Exa     = Peta * 1000
    +)
    +
    +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
    +	return map[string]float64{
    +		shortSuffix:  1,
    +		"K" + suffix: float64(scale),
    +		"M" + suffix: float64(scale * scale),
    +		"G" + suffix: float64(scale * scale * scale),
    +		"T" + suffix: float64(scale * scale * scale * scale),
    +		"P" + suffix: float64(scale * scale * scale * scale * scale),
    +		"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/alecthomas/units/util.go b/src/prometheus/vendor/github.com/alecthomas/units/util.go
    new file mode 100644
    index 0000000..6527e92
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/alecthomas/units/util.go
    @@ -0,0 +1,138 @@
    +package units
    +
    +import (
    +	"errors"
    +	"fmt"
    +	"strings"
    +)
    +
    +var (
    +	siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
    +)
    +
    +func ToString(n int64, scale int64, suffix, baseSuffix string) string {
    +	mn := len(siUnits)
    +	out := make([]string, mn)
    +	for i, m := range siUnits {
    +		if n%scale != 0 || i == 0 && n == 0 {
    +			s := suffix
    +			if i == 0 {
    +				s = baseSuffix
    +			}
    +			out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
    +		}
    +		n /= scale
    +		if n == 0 {
    +			break
    +		}
    +	}
    +	return strings.Join(out, "")
    +}
    +
    +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
    +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
    +
    +// leadingInt consumes the leading [0-9]* from s.
    +func leadingInt(s string) (x int64, rem string, err error) {
    +	i := 0
    +	for ; i < len(s); i++ {
    +		c := s[i]
    +		if c < '0' || c > '9' {
    +			break
    +		}
    +		if x >= (1<<63-10)/10 {
    +			// overflow
    +			return 0, "", errLeadingInt
    +		}
    +		x = x*10 + int64(c) - '0'
    +	}
    +	return x, s[i:], nil
    +}
    +
    +func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
    +	// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
    +	orig := s
    +	f := float64(0)
    +	neg := false
    +
    +	// Consume [-+]?
    +	if s != "" {
    +		c := s[0]
    +		if c == '-' || c == '+' {
    +			neg = c == '-'
    +			s = s[1:]
    +		}
    +	}
    +	// Special case: if all that is left is "0", this is zero.
    +	if s == "0" {
    +		return 0, nil
    +	}
    +	if s == "" {
    +		return 0, errors.New("units: invalid " + orig)
    +	}
    +	for s != "" {
    +		g := float64(0) // this element of the sequence
    +
    +		var x int64
    +		var err error
    +
    +		// The next character must be [0-9.]
    +		if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
    +			return 0, errors.New("units: invalid " + orig)
    +		}
    +		// Consume [0-9]*
    +		pl := len(s)
    +		x, s, err = leadingInt(s)
    +		if err != nil {
    +			return 0, errors.New("units: invalid " + orig)
    +		}
    +		g = float64(x)
    +		pre := pl != len(s) // whether we consumed anything before a period
    +
    +		// Consume (\.[0-9]*)?
    +		post := false
    +		if s != "" && s[0] == '.' {
    +			s = s[1:]
    +			pl := len(s)
    +			x, s, err = leadingInt(s)
    +			if err != nil {
    +				return 0, errors.New("units: invalid " + orig)
    +			}
    +			scale := 1.0
    +			for n := pl - len(s); n > 0; n-- {
    +				scale *= 10
    +			}
    +			g += float64(x) / scale
    +			post = pl != len(s)
    +		}
    +		if !pre && !post {
    +			// no digits (e.g. ".s" or "-.s")
    +			return 0, errors.New("units: invalid " + orig)
    +		}
    +
    +		// Consume unit.
    +		i := 0
    +		for ; i < len(s); i++ {
    +			c := s[i]
    +			if c == '.' || ('0' <= c && c <= '9') {
    +				break
    +			}
    +		}
    +		u := s[:i]
    +		s = s[i:]
    +		unit, ok := unitMap[u]
    +		if !ok {
    +			return 0, errors.New("units: unknown unit " + u + " in " + orig)
    +		}
    +
    +		f += g * unit
    +	}
    +
    +	if neg {
    +		f = -f
    +	}
    +	if f < float64(-1<<63) || f > float64(1<<63-1) {
    +		return 0, errors.New("units: overflow parsing unit")
    +	}
    +	return int64(f), nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/src/prometheus/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
    new file mode 100644
    index 0000000..d645695
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
    @@ -0,0 +1,202 @@
    +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright [yyyy] [name of copyright owner]
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/src/prometheus/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
    new file mode 100644
    index 0000000..5f14d11
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
    @@ -0,0 +1,3 @@
    +AWS SDK for Go
    +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 
    +Copyright 2014-2015 Stripe, Inc.
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
    new file mode 100644
    index 0000000..56fdfc2
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
    @@ -0,0 +1,145 @@
    +// Package awserr represents API error interface accessors for the SDK.
    +package awserr
    +
    +// An Error wraps lower level errors with code, message and an original error.
    +// The underlying concrete error type may also satisfy other interfaces which
    +// can be to used to obtain more specific information about the error.
    +//
    +// Calling Error() or String() will always include the full information about
    +// an error based on its underlying type.
    +//
    +// Example:
    +//
    +//     output, err := s3manage.Upload(svc, input, opts)
    +//     if err != nil {
    +//         if awsErr, ok := err.(awserr.Error); ok {
    +//             // Get error details
    +//             log.Println("Error:", awsErr.Code(), awsErr.Message())
    +//
    +//             // Prints out full error message, including original error if there was one.
    +//             log.Println("Error:", awsErr.Error())
    +//
    +//             // Get original error
    +//             if origErr := awsErr.OrigErr(); origErr != nil {
    +//                 // operate on original error.
    +//             }
    +//         } else {
    +//             fmt.Println(err.Error())
    +//         }
    +//     }
    +//
    +type Error interface {
    +	// Satisfy the generic error interface.
    +	error
    +
    +	// Returns the short phrase depicting the classification of the error.
    +	Code() string
    +
    +	// Returns the error details message.
    +	Message() string
    +
    +	// Returns the original error if one was set.  Nil is returned if not set.
    +	OrigErr() error
    +}
    +
    +// BatchError is a batch of errors which also wraps lower level errors with
    +// code, message, and original errors. Calling Error() will include all errors
    +// that occurred in the batch.
    +//
    +// Deprecated: Replaced with BatchedErrors. Only defined for backwards
    +// compatibility.
    +type BatchError interface {
    +	// Satisfy the generic error interface.
    +	error
    +
    +	// Returns the short phrase depicting the classification of the error.
    +	Code() string
    +
    +	// Returns the error details message.
    +	Message() string
    +
    +	// Returns the original error if one was set.  Nil is returned if not set.
    +	OrigErrs() []error
    +}
    +
    +// BatchedErrors is a batch of errors which also wraps lower level errors with
    +// code, message, and original errors. Calling Error() will include all errors
    +// that occurred in the batch.
    +//
    +// Replaces BatchError
    +type BatchedErrors interface {
    +	// Satisfy the base Error interface.
    +	Error
    +
    +	// Returns the original error if one was set.  Nil is returned if not set.
    +	OrigErrs() []error
    +}
    +
    +// New returns an Error object described by the code, message, and origErr.
    +//
    +// If origErr satisfies the Error interface it will not be wrapped within a new
    +// Error object and will instead be returned.
    +func New(code, message string, origErr error) Error {
    +	var errs []error
    +	if origErr != nil {
    +		errs = append(errs, origErr)
    +	}
    +	return newBaseError(code, message, errs)
    +}
    +
    +// NewBatchError returns an BatchedErrors with a collection of errors as an
    +// array of errors.
    +func NewBatchError(code, message string, errs []error) BatchedErrors {
    +	return newBaseError(code, message, errs)
    +}
    +
    +// A RequestFailure is an interface to extract request failure information from
    +// an Error such as the request ID of the failed request returned by a service.
    +// RequestFailures may not always have a requestID value if the request failed
    +// prior to reaching the service such as a connection error.
    +//
    +// Example:
    +//
    +//     output, err := s3manage.Upload(svc, input, opts)
    +//     if err != nil {
    +//         if reqerr, ok := err.(RequestFailure); ok {
    +//             log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
    +//         } else {
    +//             log.Println("Error:", err.Error())
    +//         }
    +//     }
    +//
    +// Combined with awserr.Error:
    +//
    +//    output, err := s3manage.Upload(svc, input, opts)
    +//    if err != nil {
    +//        if awsErr, ok := err.(awserr.Error); ok {
    +//            // Generic AWS Error with Code, Message, and original error (if any)
    +//            fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
    +//
    +//            if reqErr, ok := err.(awserr.RequestFailure); ok {
    +//                // A service error occurred
    +//                fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
    +//            }
    +//        } else {
    +//            fmt.Println(err.Error())
    +//        }
    +//    }
    +//
    +type RequestFailure interface {
    +	Error
    +
    +	// The status code of the HTTP response.
    +	StatusCode() int
    +
    +	// The request ID returned by the service for a request failure. This will
    +	// be empty if no request ID is available such as the request failed due
    +	// to a connection error.
    +	RequestID() string
    +}
    +
    +// NewRequestFailure returns a new request error wrapper for the given Error
    +// provided.
    +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
    +	return newRequestError(err, statusCode, reqID)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
    new file mode 100644
    index 0000000..0202a00
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
    @@ -0,0 +1,194 @@
    +package awserr
    +
    +import "fmt"
    +
    +// SprintError returns a string of the formatted error code.
    +//
    +// Both extra and origErr are optional.  If they are included their lines
    +// will be added, but if they are not included their lines will be ignored.
    +func SprintError(code, message, extra string, origErr error) string {
    +	msg := fmt.Sprintf("%s: %s", code, message)
    +	if extra != "" {
    +		msg = fmt.Sprintf("%s\n\t%s", msg, extra)
    +	}
    +	if origErr != nil {
    +		msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
    +	}
    +	return msg
    +}
    +
    +// A baseError wraps the code and message which defines an error. It also
    +// can be used to wrap an original error object.
    +//
    +// Should be used as the root for errors satisfying the awserr.Error. Also
    +// for any error which does not fit into a specific error wrapper type.
    +type baseError struct {
    +	// Classification of error
    +	code string
    +
    +	// Detailed information about error
    +	message string
    +
    +	// Optional original error this error is based off of. Allows building
    +	// chained errors.
    +	errs []error
    +}
    +
    +// newBaseError returns an error object for the code, message, and errors.
    +//
    +// code is a short no whitespace phrase depicting the classification of
    +// the error that is being created.
    +//
    +// message is the free flow string containing detailed information about the
    +// error.
    +//
    +// origErrs is the error objects which will be nested under the new errors to
    +// be returned.
    +func newBaseError(code, message string, origErrs []error) *baseError {
    +	b := &baseError{
    +		code:    code,
    +		message: message,
    +		errs:    origErrs,
    +	}
    +
    +	return b
    +}
    +
    +// Error returns the string representation of the error.
    +//
    +// See ErrorWithExtra for formatting.
    +//
    +// Satisfies the error interface.
    +func (b baseError) Error() string {
    +	size := len(b.errs)
    +	if size > 0 {
    +		return SprintError(b.code, b.message, "", errorList(b.errs))
    +	}
    +
    +	return SprintError(b.code, b.message, "", nil)
    +}
    +
    +// String returns the string representation of the error.
    +// Alias for Error to satisfy the stringer interface.
    +func (b baseError) String() string {
    +	return b.Error()
    +}
    +
    +// Code returns the short phrase depicting the classification of the error.
    +func (b baseError) Code() string {
    +	return b.code
    +}
    +
    +// Message returns the error details message.
    +func (b baseError) Message() string {
    +	return b.message
    +}
    +
    +// OrigErr returns the original error if one was set. Nil is returned if no
    +// error was set. This only returns the first element in the list. If the full
    +// list is needed, use BatchedErrors.
    +func (b baseError) OrigErr() error {
    +	switch len(b.errs) {
    +	case 0:
    +		return nil
    +	case 1:
    +		return b.errs[0]
    +	default:
    +		if err, ok := b.errs[0].(Error); ok {
    +			return NewBatchError(err.Code(), err.Message(), b.errs[1:])
    +		}
    +		return NewBatchError("BatchedErrors",
    +			"multiple errors occurred", b.errs)
    +	}
    +}
    +
    +// OrigErrs returns the original errors if one was set. An empty slice is
    +// returned if no error was set.
    +func (b baseError) OrigErrs() []error {
    +	return b.errs
    +}
    +
    +// So that the Error interface type can be included as an anonymous field
    +// in the requestError struct and not conflict with the error.Error() method.
    +type awsError Error
    +
    +// A requestError wraps a request or service error.
    +//
    +// Composed of baseError for code, message, and original error.
    +type requestError struct {
    +	awsError
    +	statusCode int
    +	requestID  string
    +}
    +
    +// newRequestError returns a wrapped error with additional information for
    +// request status code, and service requestID.
    +//
    +// Should be used to wrap all request which involve service requests. Even if
    +// the request failed without a service response, but had an HTTP status code
    +// that may be meaningful.
    +//
    +// Also wraps original errors via the baseError.
    +func newRequestError(err Error, statusCode int, requestID string) *requestError {
    +	return &requestError{
    +		awsError:   err,
    +		statusCode: statusCode,
    +		requestID:  requestID,
    +	}
    +}
    +
    +// Error returns the string representation of the error.
    +// Satisfies the error interface.
    +func (r requestError) Error() string {
    +	extra := fmt.Sprintf("status code: %d, request id: %s",
    +		r.statusCode, r.requestID)
    +	return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
    +}
    +
    +// String returns the string representation of the error.
    +// Alias for Error to satisfy the stringer interface.
    +func (r requestError) String() string {
    +	return r.Error()
    +}
    +
    +// StatusCode returns the wrapped status code for the error
    +func (r requestError) StatusCode() int {
    +	return r.statusCode
    +}
    +
    +// RequestID returns the wrapped requestID
    +func (r requestError) RequestID() string {
    +	return r.requestID
    +}
    +
    +// OrigErrs returns the original errors if one was set. An empty slice is
    +// returned if no error was set.
    +func (r requestError) OrigErrs() []error {
    +	if b, ok := r.awsError.(BatchedErrors); ok {
    +		return b.OrigErrs()
    +	}
    +	return []error{r.OrigErr()}
    +}
    +
    +// An error list that satisfies the golang interface
    +type errorList []error
    +
    +// Error returns the string representation of the error.
    +//
    +// Satisfies the error interface.
    +func (e errorList) Error() string {
    +	msg := ""
    +	// How do we want to handle the array size being zero
    +	if size := len(e); size > 0 {
    +		for i := 0; i < size; i++ {
    +			msg += fmt.Sprintf("%s", e[i].Error())
    +			// We check the next index to see if it is within the slice.
    +			// If it is, then we append a newline. We do this, because unit tests
    +			// could be broken with the additional '\n'
    +			if i+1 < size {
    +				msg += "\n"
    +			}
    +		}
    +	}
    +	return msg
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
    new file mode 100644
    index 0000000..1a3d106
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
    @@ -0,0 +1,108 @@
    +package awsutil
    +
    +import (
    +	"io"
    +	"reflect"
    +	"time"
    +)
    +
    +// Copy deeply copies a src structure to dst. Useful for copying request and
    +// response structures.
    +//
    +// Can copy between structs of different type, but will only copy fields which
    +// are assignable, and exist in both structs. Fields which are not assignable,
    +// or do not exist in both structs are ignored.
    +func Copy(dst, src interface{}) {
    +	dstval := reflect.ValueOf(dst)
    +	if !dstval.IsValid() {
    +		panic("Copy dst cannot be nil")
    +	}
    +
    +	rcopy(dstval, reflect.ValueOf(src), true)
    +}
    +
    +// CopyOf returns a copy of src while also allocating the memory for dst.
    +// src must be a pointer type or this operation will fail.
    +func CopyOf(src interface{}) (dst interface{}) {
    +	dsti := reflect.New(reflect.TypeOf(src).Elem())
    +	dst = dsti.Interface()
    +	rcopy(dsti, reflect.ValueOf(src), true)
    +	return
    +}
    +
    +// rcopy performs a recursive copy of values from the source to destination.
    +//
    +// root is used to skip certain aspects of the copy which are not valid
    +// for the root node of a object.
    +func rcopy(dst, src reflect.Value, root bool) {
    +	if !src.IsValid() {
    +		return
    +	}
    +
    +	switch src.Kind() {
    +	case reflect.Ptr:
    +		if _, ok := src.Interface().(io.Reader); ok {
    +			if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
    +				dst.Elem().Set(src)
    +			} else if dst.CanSet() {
    +				dst.Set(src)
    +			}
    +		} else {
    +			e := src.Type().Elem()
    +			if dst.CanSet() && !src.IsNil() {
    +				if _, ok := src.Interface().(*time.Time); !ok {
    +					dst.Set(reflect.New(e))
    +				} else {
    +					tempValue := reflect.New(e)
    +					tempValue.Elem().Set(src.Elem())
    +					// Sets time.Time's unexported values
    +					dst.Set(tempValue)
    +				}
    +			}
    +			if src.Elem().IsValid() {
    +				// Keep the current root state since the depth hasn't changed
    +				rcopy(dst.Elem(), src.Elem(), root)
    +			}
    +		}
    +	case reflect.Struct:
    +		t := dst.Type()
    +		for i := 0; i < t.NumField(); i++ {
    +			name := t.Field(i).Name
    +			srcVal := src.FieldByName(name)
    +			dstVal := dst.FieldByName(name)
    +			if srcVal.IsValid() && dstVal.CanSet() {
    +				rcopy(dstVal, srcVal, false)
    +			}
    +		}
    +	case reflect.Slice:
    +		if src.IsNil() {
    +			break
    +		}
    +
    +		s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
    +		dst.Set(s)
    +		for i := 0; i < src.Len(); i++ {
    +			rcopy(dst.Index(i), src.Index(i), false)
    +		}
    +	case reflect.Map:
    +		if src.IsNil() {
    +			break
    +		}
    +
    +		s := reflect.MakeMap(src.Type())
    +		dst.Set(s)
    +		for _, k := range src.MapKeys() {
    +			v := src.MapIndex(k)
    +			v2 := reflect.New(v.Type()).Elem()
    +			rcopy(v2, v, false)
    +			dst.SetMapIndex(k, v2)
    +		}
    +	default:
    +		// Assign the value if possible. If its not assignable, the value would
    +		// need to be converted and the impact of that may be unexpected, or is
    +		// not compatible with the dst type.
    +		if src.Type().AssignableTo(dst.Type()) {
    +			dst.Set(src)
    +		}
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
    new file mode 100644
    index 0000000..59fa4a5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
    @@ -0,0 +1,27 @@
    +package awsutil
    +
    +import (
    +	"reflect"
    +)
    +
    +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
    +// In addition to this, this method will also dereference the input values if
    +// possible so the DeepEqual performed will not fail if one parameter is a
    +// pointer and the other is not.
    +//
    +// DeepEqual will not perform indirection of nested values of the input parameters.
    +func DeepEqual(a, b interface{}) bool {
    +	ra := reflect.Indirect(reflect.ValueOf(a))
    +	rb := reflect.Indirect(reflect.ValueOf(b))
    +
    +	if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
    +		// If the elements are both nil, and of the same type the are equal
    +		// If they are of different types they are not equal
    +		return reflect.TypeOf(a) == reflect.TypeOf(b)
    +	} else if raValid != rbValid {
    +		// Both values must be valid to be equal
    +		return false
    +	}
    +
    +	return reflect.DeepEqual(ra.Interface(), rb.Interface())
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
    new file mode 100644
    index 0000000..11c52c3
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
    @@ -0,0 +1,222 @@
    +package awsutil
    +
    +import (
    +	"reflect"
    +	"regexp"
    +	"strconv"
    +	"strings"
    +
    +	"github.com/jmespath/go-jmespath"
    +)
    +
    +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
    +
    +// rValuesAtPath returns a slice of values found in value v. The values
    +// in v are explored recursively so all nested values are collected.
    +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
    +	pathparts := strings.Split(path, "||")
    +	if len(pathparts) > 1 {
    +		for _, pathpart := range pathparts {
    +			vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
    +			if len(vals) > 0 {
    +				return vals
    +			}
    +		}
    +		return nil
    +	}
    +
    +	values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
    +	components := strings.Split(path, ".")
    +	for len(values) > 0 && len(components) > 0 {
    +		var index *int64
    +		var indexStar bool
    +		c := strings.TrimSpace(components[0])
    +		if c == "" { // no actual component, illegal syntax
    +			return nil
    +		} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
    +			// TODO normalize case for user
    +			return nil // don't support unexported fields
    +		}
    +
    +		// parse this component
    +		if m := indexRe.FindStringSubmatch(c); m != nil {
    +			c = m[1]
    +			if m[2] == "" {
    +				index = nil
    +				indexStar = true
    +			} else {
    +				i, _ := strconv.ParseInt(m[2], 10, 32)
    +				index = &i
    +				indexStar = false
    +			}
    +		}
    +
    +		nextvals := []reflect.Value{}
    +		for _, value := range values {
    +			// pull component name out of struct member
    +			if value.Kind() != reflect.Struct {
    +				continue
    +			}
    +
    +			if c == "*" { // pull all members
    +				for i := 0; i < value.NumField(); i++ {
    +					if f := reflect.Indirect(value.Field(i)); f.IsValid() {
    +						nextvals = append(nextvals, f)
    +					}
    +				}
    +				continue
    +			}
    +
    +			value = value.FieldByNameFunc(func(name string) bool {
    +				if c == name {
    +					return true
    +				} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
    +					return true
    +				}
    +				return false
    +			})
    +
    +			if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
    +				if !value.IsNil() {
    +					value.Set(reflect.Zero(value.Type()))
    +				}
    +				return []reflect.Value{value}
    +			}
    +
    +			if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
    +				// TODO if the value is the terminus it should not be created
    +				// if the value to be set to its position is nil.
    +				value.Set(reflect.New(value.Type().Elem()))
    +				value = value.Elem()
    +			} else {
    +				value = reflect.Indirect(value)
    +			}
    +
    +			if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
    +				if !createPath && value.IsNil() {
    +					value = reflect.ValueOf(nil)
    +				}
    +			}
    +
    +			if value.IsValid() {
    +				nextvals = append(nextvals, value)
    +			}
    +		}
    +		values = nextvals
    +
    +		if indexStar || index != nil {
    +			nextvals = []reflect.Value{}
    +			for _, valItem := range values {
    +				value := reflect.Indirect(valItem)
    +				if value.Kind() != reflect.Slice {
    +					continue
    +				}
    +
    +				if indexStar { // grab all indices
    +					for i := 0; i < value.Len(); i++ {
    +						idx := reflect.Indirect(value.Index(i))
    +						if idx.IsValid() {
    +							nextvals = append(nextvals, idx)
    +						}
    +					}
    +					continue
    +				}
    +
    +				// pull out index
    +				i := int(*index)
    +				if i >= value.Len() { // check out of bounds
    +					if createPath {
    +						// TODO resize slice
    +					} else {
    +						continue
    +					}
    +				} else if i < 0 { // support negative indexing
    +					i = value.Len() + i
    +				}
    +				value = reflect.Indirect(value.Index(i))
    +
    +				if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
    +					if !createPath && value.IsNil() {
    +						value = reflect.ValueOf(nil)
    +					}
    +				}
    +
    +				if value.IsValid() {
    +					nextvals = append(nextvals, value)
    +				}
    +			}
    +			values = nextvals
    +		}
    +
    +		components = components[1:]
    +	}
    +	return values
    +}
    +
    +// ValuesAtPath returns a list of values at the case insensitive lexical
    +// path inside of a structure.
    +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
    +	result, err := jmespath.Search(path, i)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	v := reflect.ValueOf(result)
    +	if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
    +		return nil, nil
    +	}
    +	if s, ok := result.([]interface{}); ok {
    +		return s, err
    +	}
    +	if v.Kind() == reflect.Map && v.Len() == 0 {
    +		return nil, nil
    +	}
    +	if v.Kind() == reflect.Slice {
    +		out := make([]interface{}, v.Len())
    +		for i := 0; i < v.Len(); i++ {
    +			out[i] = v.Index(i).Interface()
    +		}
    +		return out, nil
    +	}
    +
    +	return []interface{}{result}, nil
    +}
    +
    +// SetValueAtPath sets a value at the case insensitive lexical path inside
    +// of a structure.
    +func SetValueAtPath(i interface{}, path string, v interface{}) {
    +	if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
    +		for _, rval := range rvals {
    +			if rval.Kind() == reflect.Ptr && rval.IsNil() {
    +				continue
    +			}
    +			setValue(rval, v)
    +		}
    +	}
    +}
    +
    +func setValue(dstVal reflect.Value, src interface{}) {
    +	if dstVal.Kind() == reflect.Ptr {
    +		dstVal = reflect.Indirect(dstVal)
    +	}
    +	srcVal := reflect.ValueOf(src)
    +
    +	if !srcVal.IsValid() { // src is literal nil
    +		if dstVal.CanAddr() {
    +			// Convert to pointer so that pointer's value can be nil'ed
    +			//                     dstVal = dstVal.Addr()
    +		}
    +		dstVal.Set(reflect.Zero(dstVal.Type()))
    +
    +	} else if srcVal.Kind() == reflect.Ptr {
    +		if srcVal.IsNil() {
    +			srcVal = reflect.Zero(dstVal.Type())
    +		} else {
    +			srcVal = reflect.ValueOf(src).Elem()
    +		}
    +		dstVal.Set(srcVal)
    +	} else {
    +		dstVal.Set(srcVal)
    +	}
    +
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
    new file mode 100644
    index 0000000..fc38172
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
    @@ -0,0 +1,107 @@
    +package awsutil
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"reflect"
    +	"strings"
    +)
    +
    +// Prettify returns the string representation of a value.
    +func Prettify(i interface{}) string {
    +	var buf bytes.Buffer
    +	prettify(reflect.ValueOf(i), 0, &buf)
    +	return buf.String()
    +}
    +
    +// prettify will recursively walk value v to build a textual
    +// representation of the value.
    +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
    +	for v.Kind() == reflect.Ptr {
    +		v = v.Elem()
    +	}
    +
    +	switch v.Kind() {
    +	case reflect.Struct:
    +		strtype := v.Type().String()
    +		if strtype == "time.Time" {
    +			fmt.Fprintf(buf, "%s", v.Interface())
    +			break
    +		} else if strings.HasPrefix(strtype, "io.") {
    +			buf.WriteString("")
    +			break
    +		}
    +
    +		buf.WriteString("{\n")
    +
    +		names := []string{}
    +		for i := 0; i < v.Type().NumField(); i++ {
    +			name := v.Type().Field(i).Name
    +			f := v.Field(i)
    +			if name[0:1] == strings.ToLower(name[0:1]) {
    +				continue // ignore unexported fields
    +			}
    +			if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
    +				continue // ignore unset fields
    +			}
    +			names = append(names, name)
    +		}
    +
    +		for i, n := range names {
    +			val := v.FieldByName(n)
    +			buf.WriteString(strings.Repeat(" ", indent+2))
    +			buf.WriteString(n + ": ")
    +			prettify(val, indent+2, buf)
    +
    +			if i < len(names)-1 {
    +				buf.WriteString(",\n")
    +			}
    +		}
    +
    +		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
    +	case reflect.Slice:
    +		nl, id, id2 := "", "", ""
    +		if v.Len() > 3 {
    +			nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
    +		}
    +		buf.WriteString("[" + nl)
    +		for i := 0; i < v.Len(); i++ {
    +			buf.WriteString(id2)
    +			prettify(v.Index(i), indent+2, buf)
    +
    +			if i < v.Len()-1 {
    +				buf.WriteString("," + nl)
    +			}
    +		}
    +
    +		buf.WriteString(nl + id + "]")
    +	case reflect.Map:
    +		buf.WriteString("{\n")
    +
    +		for i, k := range v.MapKeys() {
    +			buf.WriteString(strings.Repeat(" ", indent+2))
    +			buf.WriteString(k.String() + ": ")
    +			prettify(v.MapIndex(k), indent+2, buf)
    +
    +			if i < v.Len()-1 {
    +				buf.WriteString(",\n")
    +			}
    +		}
    +
    +		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
    +	default:
    +		if !v.IsValid() {
    +			fmt.Fprint(buf, "")
    +			return
    +		}
    +		format := "%v"
    +		switch v.Interface().(type) {
    +		case string:
    +			format = "%q"
    +		case io.ReadSeeker, io.Reader:
    +			format = "buffer(%p)"
    +		}
    +		fmt.Fprintf(buf, format, v.Interface())
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
    new file mode 100644
    index 0000000..b6432f1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
    @@ -0,0 +1,89 @@
    +package awsutil
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"reflect"
    +	"strings"
    +)
    +
    +// StringValue returns the string representation of a value.
    +func StringValue(i interface{}) string {
    +	var buf bytes.Buffer
    +	stringValue(reflect.ValueOf(i), 0, &buf)
    +	return buf.String()
    +}
    +
    +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
    +	for v.Kind() == reflect.Ptr {
    +		v = v.Elem()
    +	}
    +
    +	switch v.Kind() {
    +	case reflect.Struct:
    +		buf.WriteString("{\n")
    +
    +		names := []string{}
    +		for i := 0; i < v.Type().NumField(); i++ {
    +			name := v.Type().Field(i).Name
    +			f := v.Field(i)
    +			if name[0:1] == strings.ToLower(name[0:1]) {
    +				continue // ignore unexported fields
    +			}
    +			if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
    +				continue // ignore unset fields
    +			}
    +			names = append(names, name)
    +		}
    +
    +		for i, n := range names {
    +			val := v.FieldByName(n)
    +			buf.WriteString(strings.Repeat(" ", indent+2))
    +			buf.WriteString(n + ": ")
    +			stringValue(val, indent+2, buf)
    +
    +			if i < len(names)-1 {
    +				buf.WriteString(",\n")
    +			}
    +		}
    +
    +		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
    +	case reflect.Slice:
    +		nl, id, id2 := "", "", ""
    +		if v.Len() > 3 {
    +			nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
    +		}
    +		buf.WriteString("[" + nl)
    +		for i := 0; i < v.Len(); i++ {
    +			buf.WriteString(id2)
    +			stringValue(v.Index(i), indent+2, buf)
    +
    +			if i < v.Len()-1 {
    +				buf.WriteString("," + nl)
    +			}
    +		}
    +
    +		buf.WriteString(nl + id + "]")
    +	case reflect.Map:
    +		buf.WriteString("{\n")
    +
    +		for i, k := range v.MapKeys() {
    +			buf.WriteString(strings.Repeat(" ", indent+2))
    +			buf.WriteString(k.String() + ": ")
    +			stringValue(v.MapIndex(k), indent+2, buf)
    +
    +			if i < v.Len()-1 {
    +				buf.WriteString(",\n")
    +			}
    +		}
    +
    +		buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
    +	default:
    +		format := "%v"
    +		switch v.Interface().(type) {
    +		case string:
    +			format = "%q"
    +		}
    +		fmt.Fprintf(buf, format, v.Interface())
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
    new file mode 100644
    index 0000000..7c0e7d9
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
    @@ -0,0 +1,137 @@
    +package client
    +
    +import (
    +	"fmt"
    +	"net/http/httputil"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/client/metadata"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// A Config provides configuration to a service client instance.
    +type Config struct {
    +	Config                  *aws.Config
    +	Handlers                request.Handlers
    +	Endpoint, SigningRegion string
    +}
    +
    +// ConfigProvider provides a generic way for a service client to receive
    +// the ClientConfig without circular dependencies.
    +type ConfigProvider interface {
    +	ClientConfig(serviceName string, cfgs ...*aws.Config) Config
    +}
    +
    +// A Client implements the base client request and response handling
    +// used by all service clients.
    +type Client struct {
    +	request.Retryer
    +	metadata.ClientInfo
    +
    +	Config   aws.Config
    +	Handlers request.Handlers
    +}
    +
    +// New will return a pointer to a new initialized service client.
    +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
    +	svc := &Client{
    +		Config:     cfg,
    +		ClientInfo: info,
    +		Handlers:   handlers,
    +	}
    +
    +	switch retryer, ok := cfg.Retryer.(request.Retryer); {
    +	case ok:
    +		svc.Retryer = retryer
    +	case cfg.Retryer != nil && cfg.Logger != nil:
    +		s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
    +		cfg.Logger.Log(s)
    +		fallthrough
    +	default:
    +		maxRetries := aws.IntValue(cfg.MaxRetries)
    +		if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
    +			maxRetries = 3
    +		}
    +		svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
    +	}
    +
    +	svc.AddDebugHandlers()
    +
    +	for _, option := range options {
    +		option(svc)
    +	}
    +
    +	return svc
    +}
    +
    +// NewRequest returns a new Request pointer for the service API
    +// operation and parameters.
    +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
    +	return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
    +}
    +
    +// AddDebugHandlers injects debug logging handlers into the service to log request
    +// debug information.
    +func (c *Client) AddDebugHandlers() {
    +	if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
    +		return
    +	}
    +
    +	c.Handlers.Send.PushFront(logRequest)
    +	c.Handlers.Send.PushBack(logResponse)
    +}
    +
    +const logReqMsg = `DEBUG: Request %s/%s Details:
    +---[ REQUEST POST-SIGN ]-----------------------------
    +%s
    +-----------------------------------------------------`
    +
    +const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
    +---[ REQUEST DUMP ERROR ]-----------------------------
    +%s
    +-----------------------------------------------------`
    +
    +func logRequest(r *request.Request) {
    +	logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
    +	dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
    +	if err != nil {
    +		r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
    +		return
    +	}
    +
    +	if logBody {
    +		// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
    +		// Body as a NoOpCloser and will not be reset after read by the HTTP
    +		// client reader.
    +		r.ResetBody()
    +	}
    +
    +	r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
    +}
    +
    +const logRespMsg = `DEBUG: Response %s/%s Details:
    +---[ RESPONSE ]--------------------------------------
    +%s
    +-----------------------------------------------------`
    +
    +const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
    +---[ RESPONSE DUMP ERROR ]-----------------------------
    +%s
    +-----------------------------------------------------`
    +
    +func logResponse(r *request.Request) {
    +	var msg = "no response data"
    +	if r.HTTPResponse != nil {
    +		logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
    +		dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
    +		if err != nil {
    +			r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
    +			return
    +		}
    +
    +		msg = string(dumpedBody)
    +	} else if r.Error != nil {
    +		msg = r.Error.Error()
    +	}
    +	r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
    new file mode 100644
    index 0000000..43a3676
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
    @@ -0,0 +1,90 @@
    +package client
    +
    +import (
    +	"math/rand"
    +	"sync"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// DefaultRetryer implements basic retry logic using exponential backoff for
    +// most services. If you want to implement custom retry logic, implement the
    +// request.Retryer interface or create a structure type that composes this
    +// struct and override the specific methods. For example, to override only
    +// the MaxRetries method:
    +//
    +//		type retryer struct {
    +//      service.DefaultRetryer
    +//    }
    +//
    +//    // This implementation always has 100 max retries
    +//    func (d retryer) MaxRetries() uint { return 100 }
    +type DefaultRetryer struct {
    +	NumMaxRetries int
    +}
    +
    +// MaxRetries returns the number of maximum returns the service will use to make
    +// an individual API request.
    +func (d DefaultRetryer) MaxRetries() int {
    +	return d.NumMaxRetries
    +}
    +
    +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
    +
    +// RetryRules returns the delay duration before retrying this request again
    +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
    +	// Set the upper limit of delay in retrying at ~five minutes
    +	minTime := 30
    +	throttle := d.shouldThrottle(r)
    +	if throttle {
    +		minTime = 500
    +	}
    +
    +	retryCount := r.RetryCount
    +	if retryCount > 13 {
    +		retryCount = 13
    +	} else if throttle && retryCount > 8 {
    +		retryCount = 8
    +	}
    +
    +	delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
    +	return time.Duration(delay) * time.Millisecond
    +}
    +
    +// ShouldRetry returns true if the request should be retried.
    +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
    +	if r.HTTPResponse.StatusCode >= 500 {
    +		return true
    +	}
    +	return r.IsErrorRetryable() || d.shouldThrottle(r)
    +}
    +
    +// ShouldThrottle returns true if the request should be throttled.
    +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
    +	if r.HTTPResponse.StatusCode == 502 ||
    +		r.HTTPResponse.StatusCode == 503 ||
    +		r.HTTPResponse.StatusCode == 504 {
    +		return true
    +	}
    +	return r.IsErrorThrottle()
    +}
    +
    +// lockedSource is a thread-safe implementation of rand.Source
    +type lockedSource struct {
    +	lk  sync.Mutex
    +	src rand.Source
    +}
    +
    +func (r *lockedSource) Int63() (n int64) {
    +	r.lk.Lock()
    +	n = r.src.Int63()
    +	r.lk.Unlock()
    +	return
    +}
    +
    +func (r *lockedSource) Seed(seed int64) {
    +	r.lk.Lock()
    +	r.src.Seed(seed)
    +	r.lk.Unlock()
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
    new file mode 100644
    index 0000000..4778056
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
    @@ -0,0 +1,12 @@
    +package metadata
    +
    +// ClientInfo wraps immutable data from the client.Client structure.
    +type ClientInfo struct {
    +	ServiceName   string
    +	APIVersion    string
    +	Endpoint      string
    +	SigningName   string
    +	SigningRegion string
    +	JSONVersion   string
    +	TargetPrefix  string
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/config.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/config.go
    new file mode 100644
    index 0000000..34c2bab
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/config.go
    @@ -0,0 +1,419 @@
    +package aws
    +
    +import (
    +	"net/http"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +)
    +
    +// UseServiceDefaultRetries instructs the config to use the service's own
    +// default number of retries. This will be the default action if
    +// Config.MaxRetries is nil also.
    +const UseServiceDefaultRetries = -1
    +
    +// RequestRetryer is an alias for a type that implements the request.Retryer
    +// interface.
    +type RequestRetryer interface{}
    +
    +// A Config provides service configuration for service clients. By default,
    +// all clients will use the defaults.DefaultConfig tructure.
    +//
    +//     // Create Session with MaxRetry configuration to be shared by multiple
    +//     // service clients.
    +//     sess, err := session.NewSession(&aws.Config{
    +//         MaxRetries: aws.Int(3),
    +//     })
    +//
    +//     // Create S3 service client with a specific Region.
    +//     svc := s3.New(sess, &aws.Config{
    +//         Region: aws.String("us-west-2"),
    +//     })
    +type Config struct {
    +	// Enables verbose error printing of all credential chain errors.
    +	// Should be used when wanting to see all errors while attempting to
    +	// retrieve credentials.
    +	CredentialsChainVerboseErrors *bool
    +
    +	// The credentials object to use when signing requests. Defaults to a
    +	// chain of credential providers to search for credentials in environment
    +	// variables, shared credential file, and EC2 Instance Roles.
    +	Credentials *credentials.Credentials
    +
    +	// An optional endpoint URL (hostname only or fully qualified URI)
    +	// that overrides the default generated endpoint for a client. Set this
    +	// to `""` to use the default generated endpoint.
    +	//
    +	// @note You must still provide a `Region` value when specifying an
    +	//   endpoint for a client.
    +	Endpoint *string
    +
    +	// The region to send requests to. This parameter is required and must
    +	// be configured globally or on a per-client basis unless otherwise
    +	// noted. A full list of regions is found in the "Regions and Endpoints"
    +	// document.
    +	//
    +	// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
    +	//   AWS Regions and Endpoints
    +	Region *string
    +
    +	// Set this to `true` to disable SSL when sending requests. Defaults
    +	// to `false`.
    +	DisableSSL *bool
    +
    +	// The HTTP client to use when sending requests. Defaults to
    +	// `http.DefaultClient`.
    +	HTTPClient *http.Client
    +
    +	// An integer value representing the logging level. The default log level
    +	// is zero (LogOff), which represents no logging. To enable logging set
    +	// to a LogLevel Value.
    +	LogLevel *LogLevelType
    +
    +	// The logger writer interface to write logging messages to. Defaults to
    +	// standard out.
    +	Logger Logger
    +
    +	// The maximum number of times that a request will be retried for failures.
    +	// Defaults to -1, which defers the max retry setting to the service
    +	// specific configuration.
    +	MaxRetries *int
    +
    +	// Retryer guides how HTTP requests should be retried in case of
    +	// recoverable failures.
    +	//
    +	// When nil or the value does not implement the request.Retryer interface,
    +	// the request.DefaultRetryer will be used.
    +	//
    +	// When both Retryer and MaxRetries are non-nil, the former is used and
    +	// the latter ignored.
    +	//
    +	// To set the Retryer field in a type-safe manner and with chaining, use
    +	// the request.WithRetryer helper function:
    +	//
    +	//   cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
    +	//
    +	Retryer RequestRetryer
    +
    +	// Disables semantic parameter validation, which validates input for
    +	// missing required fields and/or other semantic request input errors.
    +	DisableParamValidation *bool
    +
    +	// Disables the computation of request and response checksums, e.g.,
    +	// CRC32 checksums in Amazon DynamoDB.
    +	DisableComputeChecksums *bool
    +
    +	// Set this to `true` to force the request to use path-style addressing,
    +	// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
    +	// will use virtual hosted bucket addressing when possible
    +	// (`http://BUCKET.s3.amazonaws.com/KEY`).
    +	//
    +	// @note This configuration option is specific to the Amazon S3 service.
    +	// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
    +	//   Amazon S3: Virtual Hosting of Buckets
    +	S3ForcePathStyle *bool
    +
    +	// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
    +	// header to PUT requests over 2MB of content. 100-Continue instructs the
    +	// HTTP client not to send the body until the service responds with a
    +	// `continue` status. This is useful to prevent sending the request body
    +	// until after the request is authenticated, and validated.
    +	//
    +	// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
    +	//
    +	// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
    +	// `ExpectContinueTimeout` for information on adjusting the continue wait
    +	// timeout. https://golang.org/pkg/net/http/#Transport
    +	//
    +	// You should use this flag to disble 100-Continue if you experience issues
    +	// with proxies or third party S3 compatible services.
    +	S3Disable100Continue *bool
    +
    +	// Set this to `true` to enable S3 Accelerate feature. For all operations
    +	// compatible with S3 Accelerate will use the accelerate endpoint for
    +	// requests. Requests not compatible will fall back to normal S3 requests.
    +	//
    +	// The bucket must be enable for accelerate to be used with S3 client with
    +	// accelerate enabled. If the bucket is not enabled for accelerate an error
    +	// will be returned. The bucket name must be DNS compatible to also work
    +	// with accelerate.
    +	S3UseAccelerate *bool
    +
    +	// Set this to `true` to disable the EC2Metadata client from overriding the
    +	// default http.Client's Timeout. This is helpful if you do not want the
    +	// EC2Metadata client to create a new http.Client. This options is only
    +	// meaningful if you're not already using a custom HTTP client with the
    +	// SDK. Enabled by default.
    +	//
    +	// Must be set and provided to the session.NewSession() in order to disable
    +	// the EC2Metadata overriding the timeout for default credentials chain.
    +	//
    +	// Example:
    +	//    sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
    +	//
    +	//    svc := s3.New(sess)
    +	//
    +	EC2MetadataDisableTimeoutOverride *bool
    +
    +	// Instructs the endpiont to be generated for a service client to
    +	// be the dual stack endpoint. The dual stack endpoint will support
    +	// both IPv4 and IPv6 addressing.
    +	//
    +	// Setting this for a service which does not support dual stack will fail
    +	// to make requets. It is not recommended to set this value on the session
    +	// as it will apply to all service clients created with the session. Even
    +	// services which don't support dual stack endpoints.
    +	//
    +	// If the Endpoint config value is also provided the UseDualStack flag
    +	// will be ignored.
    +	//
    +	// Only supported with.
    +	//
    +	//     sess, err := session.NewSession()
    +	//
    +	//     svc := s3.New(sess, &aws.Config{
    +	//         UseDualStack: aws.Bool(true),
    +	//     })
    +	UseDualStack *bool
    +
    +	// SleepDelay is an override for the func the SDK will call when sleeping
    +	// during the lifecycle of a request. Specifically this will be used for
    +	// request delays. This value should only be used for testing. To adjust
    +	// the delay of a request see the aws/client.DefaultRetryer and
    +	// aws/request.Retryer.
    +	SleepDelay func(time.Duration)
    +}
    +
    +// NewConfig returns a new Config pointer that can be chained with builder
    +// methods to set multiple configuration values inline without using pointers.
    +//
    +//     // Create Session with MaxRetry configuration to be shared by multiple
    +//     // service clients.
    +//     sess, err := session.NewSession(aws.NewConfig().
    +//         WithMaxRetries(3),
    +//     )
    +//
    +//     // Create S3 service client with a specific Region.
    +//     svc := s3.New(sess, aws.NewConfig().
    +//         WithRegion("us-west-2"),
    +//     )
    +func NewConfig() *Config {
    +	return &Config{}
    +}
    +
    +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
    +// a Config pointer.
    +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
    +	c.CredentialsChainVerboseErrors = &verboseErrs
    +	return c
    +}
    +
    +// WithCredentials sets a config Credentials value returning a Config pointer
    +// for chaining.
    +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
    +	c.Credentials = creds
    +	return c
    +}
    +
    +// WithEndpoint sets a config Endpoint value returning a Config pointer for
    +// chaining.
    +func (c *Config) WithEndpoint(endpoint string) *Config {
    +	c.Endpoint = &endpoint
    +	return c
    +}
    +
    +// WithRegion sets a config Region value returning a Config pointer for
    +// chaining.
    +func (c *Config) WithRegion(region string) *Config {
    +	c.Region = ®ion
    +	return c
    +}
    +
    +// WithDisableSSL sets a config DisableSSL value returning a Config pointer
    +// for chaining.
    +func (c *Config) WithDisableSSL(disable bool) *Config {
    +	c.DisableSSL = &disable
    +	return c
    +}
    +
    +// WithHTTPClient sets a config HTTPClient value returning a Config pointer
    +// for chaining.
    +func (c *Config) WithHTTPClient(client *http.Client) *Config {
    +	c.HTTPClient = client
    +	return c
    +}
    +
    +// WithMaxRetries sets a config MaxRetries value returning a Config pointer
    +// for chaining.
    +func (c *Config) WithMaxRetries(max int) *Config {
    +	c.MaxRetries = &max
    +	return c
    +}
    +
    +// WithDisableParamValidation sets a config DisableParamValidation value
    +// returning a Config pointer for chaining.
    +func (c *Config) WithDisableParamValidation(disable bool) *Config {
    +	c.DisableParamValidation = &disable
    +	return c
    +}
    +
    +// WithDisableComputeChecksums sets a config DisableComputeChecksums value
    +// returning a Config pointer for chaining.
    +func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
    +	c.DisableComputeChecksums = &disable
    +	return c
    +}
    +
    +// WithLogLevel sets a config LogLevel value returning a Config pointer for
    +// chaining.
    +func (c *Config) WithLogLevel(level LogLevelType) *Config {
    +	c.LogLevel = &level
    +	return c
    +}
    +
    +// WithLogger sets a config Logger value returning a Config pointer for
    +// chaining.
    +func (c *Config) WithLogger(logger Logger) *Config {
    +	c.Logger = logger
    +	return c
    +}
    +
    +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
    +// pointer for chaining.
    +func (c *Config) WithS3ForcePathStyle(force bool) *Config {
    +	c.S3ForcePathStyle = &force
    +	return c
    +}
    +
    +// WithS3Disable100Continue sets a config S3Disable100Continue value returning
    +// a Config pointer for chaining.
    +func (c *Config) WithS3Disable100Continue(disable bool) *Config {
    +	c.S3Disable100Continue = &disable
    +	return c
    +}
    +
    +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
    +// pointer for chaining.
    +func (c *Config) WithS3UseAccelerate(enable bool) *Config {
    +	c.S3UseAccelerate = &enable
    +	return c
    +}
    +
    +// WithUseDualStack sets a config UseDualStack value returning a Config
    +// pointer for chaining.
    +func (c *Config) WithUseDualStack(enable bool) *Config {
    +	c.UseDualStack = &enable
    +	return c
    +}
    +
    +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
    +// returning a Config pointer for chaining.
    +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
    +	c.EC2MetadataDisableTimeoutOverride = &enable
    +	return c
    +}
    +
    +// WithSleepDelay overrides the function used to sleep while waiting for the
    +// next retry. Defaults to time.Sleep.
    +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
    +	c.SleepDelay = fn
    +	return c
    +}
    +
    +// MergeIn merges the passed in configs into the existing config object.
    +func (c *Config) MergeIn(cfgs ...*Config) {
    +	for _, other := range cfgs {
    +		mergeInConfig(c, other)
    +	}
    +}
    +
    +func mergeInConfig(dst *Config, other *Config) {
    +	if other == nil {
    +		return
    +	}
    +
    +	if other.CredentialsChainVerboseErrors != nil {
    +		dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
    +	}
    +
    +	if other.Credentials != nil {
    +		dst.Credentials = other.Credentials
    +	}
    +
    +	if other.Endpoint != nil {
    +		dst.Endpoint = other.Endpoint
    +	}
    +
    +	if other.Region != nil {
    +		dst.Region = other.Region
    +	}
    +
    +	if other.DisableSSL != nil {
    +		dst.DisableSSL = other.DisableSSL
    +	}
    +
    +	if other.HTTPClient != nil {
    +		dst.HTTPClient = other.HTTPClient
    +	}
    +
    +	if other.LogLevel != nil {
    +		dst.LogLevel = other.LogLevel
    +	}
    +
    +	if other.Logger != nil {
    +		dst.Logger = other.Logger
    +	}
    +
    +	if other.MaxRetries != nil {
    +		dst.MaxRetries = other.MaxRetries
    +	}
    +
    +	if other.Retryer != nil {
    +		dst.Retryer = other.Retryer
    +	}
    +
    +	if other.DisableParamValidation != nil {
    +		dst.DisableParamValidation = other.DisableParamValidation
    +	}
    +
    +	if other.DisableComputeChecksums != nil {
    +		dst.DisableComputeChecksums = other.DisableComputeChecksums
    +	}
    +
    +	if other.S3ForcePathStyle != nil {
    +		dst.S3ForcePathStyle = other.S3ForcePathStyle
    +	}
    +
    +	if other.S3Disable100Continue != nil {
    +		dst.S3Disable100Continue = other.S3Disable100Continue
    +	}
    +
    +	if other.S3UseAccelerate != nil {
    +		dst.S3UseAccelerate = other.S3UseAccelerate
    +	}
    +
    +	if other.UseDualStack != nil {
    +		dst.UseDualStack = other.UseDualStack
    +	}
    +
    +	if other.EC2MetadataDisableTimeoutOverride != nil {
    +		dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
    +	}
    +
    +	if other.SleepDelay != nil {
    +		dst.SleepDelay = other.SleepDelay
    +	}
    +}
    +
    +// Copy will return a shallow copy of the Config object. If any additional
    +// configurations are provided they will be merged into the new config returned.
    +func (c *Config) Copy(cfgs ...*Config) *Config {
    +	dst := &Config{}
    +	dst.MergeIn(c)
    +
    +	for _, cfg := range cfgs {
    +		dst.MergeIn(cfg)
    +	}
    +
    +	return dst
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
    new file mode 100644
    index 0000000..3b73a7d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
    @@ -0,0 +1,369 @@
    +package aws
    +
    +import "time"
    +
    +// String returns a pointer to the string value passed in.
    +func String(v string) *string {
    +	return &v
    +}
    +
    +// StringValue returns the value of the string pointer passed in or
    +// "" if the pointer is nil.
    +func StringValue(v *string) string {
    +	if v != nil {
    +		return *v
    +	}
    +	return ""
    +}
    +
    +// StringSlice converts a slice of string values into a slice of
    +// string pointers
    +func StringSlice(src []string) []*string {
    +	dst := make([]*string, len(src))
    +	for i := 0; i < len(src); i++ {
    +		dst[i] = &(src[i])
    +	}
    +	return dst
    +}
    +
    +// StringValueSlice converts a slice of string pointers into a slice of
    +// string values
    +func StringValueSlice(src []*string) []string {
    +	dst := make([]string, len(src))
    +	for i := 0; i < len(src); i++ {
    +		if src[i] != nil {
    +			dst[i] = *(src[i])
    +		}
    +	}
    +	return dst
    +}
    +
    +// StringMap converts a string map of string values into a string
    +// map of string pointers
    +func StringMap(src map[string]string) map[string]*string {
    +	dst := make(map[string]*string)
    +	for k, val := range src {
    +		v := val
    +		dst[k] = &v
    +	}
    +	return dst
    +}
    +
    +// StringValueMap converts a string map of string pointers into a string
    +// map of string values
    +func StringValueMap(src map[string]*string) map[string]string {
    +	dst := make(map[string]string)
    +	for k, val := range src {
    +		if val != nil {
    +			dst[k] = *val
    +		}
    +	}
    +	return dst
    +}
    +
    +// Bool returns a pointer to the bool value passed in.
    +func Bool(v bool) *bool {
    +	return &v
    +}
    +
    +// BoolValue returns the value of the bool pointer passed in or
    +// false if the pointer is nil.
    +func BoolValue(v *bool) bool {
    +	if v != nil {
    +		return *v
    +	}
    +	return false
    +}
    +
    +// BoolSlice converts a slice of bool values into a slice of
    +// bool pointers
    +func BoolSlice(src []bool) []*bool {
    +	dst := make([]*bool, len(src))
    +	for i := 0; i < len(src); i++ {
    +		dst[i] = &(src[i])
    +	}
    +	return dst
    +}
    +
    +// BoolValueSlice converts a slice of bool pointers into a slice of
    +// bool values
    +func BoolValueSlice(src []*bool) []bool {
    +	dst := make([]bool, len(src))
    +	for i := 0; i < len(src); i++ {
    +		if src[i] != nil {
    +			dst[i] = *(src[i])
    +		}
    +	}
    +	return dst
    +}
    +
    +// BoolMap converts a string map of bool values into a string
    +// map of bool pointers
    +func BoolMap(src map[string]bool) map[string]*bool {
    +	dst := make(map[string]*bool)
    +	for k, val := range src {
    +		v := val
    +		dst[k] = &v
    +	}
    +	return dst
    +}
    +
    +// BoolValueMap converts a string map of bool pointers into a string
    +// map of bool values
    +func BoolValueMap(src map[string]*bool) map[string]bool {
    +	dst := make(map[string]bool)
    +	for k, val := range src {
    +		if val != nil {
    +			dst[k] = *val
    +		}
    +	}
    +	return dst
    +}
    +
    +// Int returns a pointer to the int value passed in.
    +func Int(v int) *int {
    +	return &v
    +}
    +
    +// IntValue returns the value of the int pointer passed in or
    +// 0 if the pointer is nil.
    +func IntValue(v *int) int {
    +	if v != nil {
    +		return *v
    +	}
    +	return 0
    +}
    +
    +// IntSlice converts a slice of int values into a slice of
    +// int pointers
    +func IntSlice(src []int) []*int {
    +	dst := make([]*int, len(src))
    +	for i := 0; i < len(src); i++ {
    +		dst[i] = &(src[i])
    +	}
    +	return dst
    +}
    +
    +// IntValueSlice converts a slice of int pointers into a slice of
    +// int values
    +func IntValueSlice(src []*int) []int {
    +	dst := make([]int, len(src))
    +	for i := 0; i < len(src); i++ {
    +		if src[i] != nil {
    +			dst[i] = *(src[i])
    +		}
    +	}
    +	return dst
    +}
    +
    +// IntMap converts a string map of int values into a string
    +// map of int pointers
    +func IntMap(src map[string]int) map[string]*int {
    +	dst := make(map[string]*int)
    +	for k, val := range src {
    +		v := val
    +		dst[k] = &v
    +	}
    +	return dst
    +}
    +
    +// IntValueMap converts a string map of int pointers into a string
    +// map of int values
    +func IntValueMap(src map[string]*int) map[string]int {
    +	dst := make(map[string]int)
    +	for k, val := range src {
    +		if val != nil {
    +			dst[k] = *val
    +		}
    +	}
    +	return dst
    +}
    +
    +// Int64 returns a pointer to the int64 value passed in.
    +func Int64(v int64) *int64 {
    +	return &v
    +}
    +
    +// Int64Value returns the value of the int64 pointer passed in or
    +// 0 if the pointer is nil.
    +func Int64Value(v *int64) int64 {
    +	if v != nil {
    +		return *v
    +	}
    +	return 0
    +}
    +
    +// Int64Slice converts a slice of int64 values into a slice of
    +// int64 pointers
    +func Int64Slice(src []int64) []*int64 {
    +	dst := make([]*int64, len(src))
    +	for i := 0; i < len(src); i++ {
    +		dst[i] = &(src[i])
    +	}
    +	return dst
    +}
    +
    +// Int64ValueSlice converts a slice of int64 pointers into a slice of
    +// int64 values
    +func Int64ValueSlice(src []*int64) []int64 {
    +	dst := make([]int64, len(src))
    +	for i := 0; i < len(src); i++ {
    +		if src[i] != nil {
    +			dst[i] = *(src[i])
    +		}
    +	}
    +	return dst
    +}
    +
    +// Int64Map converts a string map of int64 values into a string
    +// map of int64 pointers
    +func Int64Map(src map[string]int64) map[string]*int64 {
    +	dst := make(map[string]*int64)
    +	for k, val := range src {
    +		v := val
    +		dst[k] = &v
    +	}
    +	return dst
    +}
    +
    +// Int64ValueMap converts a string map of int64 pointers into a string
    +// map of int64 values
    +func Int64ValueMap(src map[string]*int64) map[string]int64 {
    +	dst := make(map[string]int64)
    +	for k, val := range src {
    +		if val != nil {
    +			dst[k] = *val
    +		}
    +	}
    +	return dst
    +}
    +
    +// Float64 returns a pointer to the float64 value passed in.
    +func Float64(v float64) *float64 {
    +	return &v
    +}
    +
    +// Float64Value returns the value of the float64 pointer passed in or
    +// 0 if the pointer is nil.
    +func Float64Value(v *float64) float64 {
    +	if v != nil {
    +		return *v
    +	}
    +	return 0
    +}
    +
    +// Float64Slice converts a slice of float64 values into a slice of
    +// float64 pointers
    +func Float64Slice(src []float64) []*float64 {
    +	dst := make([]*float64, len(src))
    +	for i := 0; i < len(src); i++ {
    +		dst[i] = &(src[i])
    +	}
    +	return dst
    +}
    +
    +// Float64ValueSlice converts a slice of float64 pointers into a slice of
    +// float64 values
    +func Float64ValueSlice(src []*float64) []float64 {
    +	dst := make([]float64, len(src))
    +	for i := 0; i < len(src); i++ {
    +		if src[i] != nil {
    +			dst[i] = *(src[i])
    +		}
    +	}
    +	return dst
    +}
    +
    +// Float64Map converts a string map of float64 values into a string
    +// map of float64 pointers
    +func Float64Map(src map[string]float64) map[string]*float64 {
    +	dst := make(map[string]*float64)
    +	for k, val := range src {
    +		v := val
    +		dst[k] = &v
    +	}
    +	return dst
    +}
    +
    +// Float64ValueMap converts a string map of float64 pointers into a string
    +// map of float64 values
    +func Float64ValueMap(src map[string]*float64) map[string]float64 {
    +	dst := make(map[string]float64)
    +	for k, val := range src {
    +		if val != nil {
    +			dst[k] = *val
    +		}
    +	}
    +	return dst
    +}
    +
    +// Time returns a pointer to the time.Time value passed in.
    +func Time(v time.Time) *time.Time {
    +	return &v
    +}
    +
    +// TimeValue returns the value of the time.Time pointer passed in or
    +// time.Time{} if the pointer is nil.
    +func TimeValue(v *time.Time) time.Time {
    +	if v != nil {
    +		return *v
    +	}
    +	return time.Time{}
    +}
    +
    +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
    +// The result is undefined if the Unix time cannot be represented by an int64.
    +// Which includes calling TimeUnixMilli on a zero Time is undefined.
    +//
    +// This utility is useful for service API's such as CloudWatch Logs which require
    +// their unix time values to be in milliseconds.
    +//
    +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
    +func TimeUnixMilli(t time.Time) int64 {
    +	return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
    +}
    +
    +// TimeSlice converts a slice of time.Time values into a slice of
    +// time.Time pointers
    +func TimeSlice(src []time.Time) []*time.Time {
    +	dst := make([]*time.Time, len(src))
    +	for i := 0; i < len(src); i++ {
    +		dst[i] = &(src[i])
    +	}
    +	return dst
    +}
    +
    +// TimeValueSlice converts a slice of time.Time pointers into a slice of
    +// time.Time values
    +func TimeValueSlice(src []*time.Time) []time.Time {
    +	dst := make([]time.Time, len(src))
    +	for i := 0; i < len(src); i++ {
    +		if src[i] != nil {
    +			dst[i] = *(src[i])
    +		}
    +	}
    +	return dst
    +}
    +
    +// TimeMap converts a string map of time.Time values into a string
    +// map of time.Time pointers
    +func TimeMap(src map[string]time.Time) map[string]*time.Time {
    +	dst := make(map[string]*time.Time)
    +	for k, val := range src {
    +		v := val
    +		dst[k] = &v
    +	}
    +	return dst
    +}
    +
    +// TimeValueMap converts a string map of time.Time pointers into a string
    +// map of time.Time values
    +func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
    +	dst := make(map[string]time.Time)
    +	for k, val := range src {
    +		if val != nil {
    +			dst[k] = *val
    +		}
    +	}
    +	return dst
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
    new file mode 100644
    index 0000000..8e12f82
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
    @@ -0,0 +1,182 @@
    +package corehandlers
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"net/url"
    +	"regexp"
    +	"runtime"
    +	"strconv"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// Interface for matching types which also have a Len method.
    +type lener interface {
    +	Len() int
    +}
    +
    +// BuildContentLengthHandler builds the content length of a request based on the body,
    +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
    +// to determine request body length and no "Content-Length" was specified it will panic.
    +//
    +// The Content-Length will only be aded to the request if the length of the body
    +// is greater than 0. If the body is empty or the current `Content-Length`
    +// header is <= 0, the header will also be stripped.
    +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
    +	var length int64
    +
    +	if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
    +		length, _ = strconv.ParseInt(slength, 10, 64)
    +	} else {
    +		switch body := r.Body.(type) {
    +		case nil:
    +			length = 0
    +		case lener:
    +			length = int64(body.Len())
    +		case io.Seeker:
    +			r.BodyStart, _ = body.Seek(0, 1)
    +			end, _ := body.Seek(0, 2)
    +			body.Seek(r.BodyStart, 0) // make sure to seek back to original location
    +			length = end - r.BodyStart
    +		default:
    +			panic("Cannot get length of body, must provide `ContentLength`")
    +		}
    +	}
    +
    +	if length > 0 {
    +		r.HTTPRequest.ContentLength = length
    +		r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
    +	} else {
    +		r.HTTPRequest.ContentLength = 0
    +		r.HTTPRequest.Header.Del("Content-Length")
    +	}
    +}}
    +
    +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
    +var SDKVersionUserAgentHandler = request.NamedHandler{
    +	Name: "core.SDKVersionUserAgentHandler",
    +	Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
    +		runtime.Version(), runtime.GOOS, runtime.GOARCH),
    +}
    +
    +var reStatusCode = regexp.MustCompile(`^(\d{3})`)
    +
    +// ValidateReqSigHandler is a request handler to ensure that the request's
    +// signature doesn't expire before it is sent. This can happen when a request
    +// is built and signed signficantly before it is sent. Or signficant delays
    +// occur whne retrying requests that would cause the signature to expire.
    +var ValidateReqSigHandler = request.NamedHandler{
    +	Name: "core.ValidateReqSigHandler",
    +	Fn: func(r *request.Request) {
    +		// Unsigned requests are not signed
    +		if r.Config.Credentials == credentials.AnonymousCredentials {
    +			return
    +		}
    +
    +		signedTime := r.Time
    +		if !r.LastSignedAt.IsZero() {
    +			signedTime = r.LastSignedAt
    +		}
    +
    +		// 10 minutes to allow for some clock skew/delays in transmission.
    +		// Would be improved with aws/aws-sdk-go#423
    +		if signedTime.Add(10 * time.Minute).After(time.Now()) {
    +			return
    +		}
    +
    +		fmt.Println("request expired, resigning")
    +		r.Sign()
    +	},
    +}
    +
    +// SendHandler is a request handler to send service request using HTTP client.
    +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
    +	var err error
    +	r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
    +	if err != nil {
    +		// Prevent leaking if an HTTPResponse was returned. Clean up
    +		// the body.
    +		if r.HTTPResponse != nil {
    +			r.HTTPResponse.Body.Close()
    +		}
    +		// Capture the case where url.Error is returned for error processing
    +		// response. e.g. 301 without location header comes back as string
    +		// error and r.HTTPResponse is nil. Other url redirect errors will
    +		// comeback in a similar method.
    +		if e, ok := err.(*url.Error); ok && e.Err != nil {
    +			if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
    +				code, _ := strconv.ParseInt(s[1], 10, 64)
    +				r.HTTPResponse = &http.Response{
    +					StatusCode: int(code),
    +					Status:     http.StatusText(int(code)),
    +					Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
    +				}
    +				return
    +			}
    +		}
    +		if r.HTTPResponse == nil {
    +			// Add a dummy request response object to ensure the HTTPResponse
    +			// value is consistent.
    +			r.HTTPResponse = &http.Response{
    +				StatusCode: int(0),
    +				Status:     http.StatusText(int(0)),
    +				Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
    +			}
    +		}
    +		// Catch all other request errors.
    +		r.Error = awserr.New("RequestError", "send request failed", err)
    +		r.Retryable = aws.Bool(true) // network errors are retryable
    +	}
    +}}
    +
    +// ValidateResponseHandler is a request handler to validate service response.
    +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
    +	if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
    +		// this may be replaced by an UnmarshalError handler
    +		r.Error = awserr.New("UnknownError", "unknown error", nil)
    +	}
    +}}
    +
    +// AfterRetryHandler performs final checks to determine if the request should
    +// be retried and how long to delay.
    +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
    +	// If one of the other handlers already set the retry state
    +	// we don't want to override it based on the service's state
    +	if r.Retryable == nil {
    +		r.Retryable = aws.Bool(r.ShouldRetry(r))
    +	}
    +
    +	if r.WillRetry() {
    +		r.RetryDelay = r.RetryRules(r)
    +		r.Config.SleepDelay(r.RetryDelay)
    +
    +		// when the expired token exception occurs the credentials
    +		// need to be expired locally so that the next request to
    +		// get credentials will trigger a credentials refresh.
    +		if r.IsErrorExpired() {
    +			r.Config.Credentials.Expire()
    +		}
    +
    +		r.RetryCount++
    +		r.Error = nil
    +	}
    +}}
    +
    +// ValidateEndpointHandler is a request handler to validate a request had the
    +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
    +// region is not valid.
    +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
    +	if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
    +		r.Error = aws.ErrMissingRegion
    +	} else if r.ClientInfo.Endpoint == "" {
    +		r.Error = aws.ErrMissingEndpoint
    +	}
    +}}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
    new file mode 100644
    index 0000000..7d50b15
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
    @@ -0,0 +1,17 @@
    +package corehandlers
    +
    +import "github.com/aws/aws-sdk-go/aws/request"
    +
    +// ValidateParametersHandler is a request handler to validate the input parameters.
    +// Validating parameters only has meaning if done prior to the request being sent.
    +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
    +	if !r.ParamsFilled() {
    +		return
    +	}
    +
    +	if v, ok := r.Params.(request.Validator); ok {
    +		if err := v.Validate(); err != nil {
    +			r.Error = err
    +		}
    +	}
    +}}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
    new file mode 100644
    index 0000000..6efc77b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
    @@ -0,0 +1,100 @@
    +package credentials
    +
    +import (
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +)
    +
    +var (
    +	// ErrNoValidProvidersFoundInChain Is returned when there are no valid
    +	// providers in the ChainProvider.
    +	//
    +	// This has been deprecated. For verbose error messaging set
    +	// aws.Config.CredentialsChainVerboseErrors to true
    +	//
    +	// @readonly
    +	ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
    +		`no valid providers in chain. Deprecated. 
    +	For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
    +		nil)
    +)
    +
    +// A ChainProvider will search for a provider which returns credentials
    +// and cache that provider until Retrieve is called again.
    +//
    +// The ChainProvider provides a way of chaining multiple providers together
    +// which will pick the first available using priority order of the Providers
    +// in the list.
    +//
    +// If none of the Providers retrieve valid credentials Value, ChainProvider's
    +// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
    +//
    +// If a Provider is found which returns valid credentials Value ChainProvider
    +// will cache that Provider for all calls to IsExpired(), until Retrieve is
    +// called again.
    +//
    +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
    +// In this example EnvProvider will first check if any credentials are available
    +// via the environment variables. If there are none ChainProvider will check
    +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
    +// does not return any credentials ChainProvider will return the error
    +// ErrNoValidProvidersFoundInChain
    +//
    +//     creds := NewChainCredentials(
    +//         []Provider{
    +//             &EnvProvider{},
    +//             &EC2RoleProvider{
    +//                 Client: ec2metadata.New(sess),
    +//             },
    +//         })
    +//
    +//     // Usage of ChainCredentials with aws.Config
    +//     svc := ec2.New(&aws.Config{Credentials: creds})
    +//
    +type ChainProvider struct {
    +	Providers     []Provider
    +	curr          Provider
    +	VerboseErrors bool
    +}
    +
    +// NewChainCredentials returns a pointer to a new Credentials object
    +// wrapping a chain of providers.
    +func NewChainCredentials(providers []Provider) *Credentials {
    +	return NewCredentials(&ChainProvider{
    +		Providers: append([]Provider{}, providers...),
    +	})
    +}
    +
    +// Retrieve returns the credentials value or error if no provider returned
    +// without error.
    +//
    +// If a provider is found it will be cached and any calls to IsExpired()
    +// will return the expired state of the cached provider.
    +func (c *ChainProvider) Retrieve() (Value, error) {
    +	var errs []error
    +	for _, p := range c.Providers {
    +		creds, err := p.Retrieve()
    +		if err == nil {
    +			c.curr = p
    +			return creds, nil
    +		}
    +		errs = append(errs, err)
    +	}
    +	c.curr = nil
    +
    +	var err error
    +	err = ErrNoValidProvidersFoundInChain
    +	if c.VerboseErrors {
    +		err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
    +	}
    +	return Value{}, err
    +}
    +
    +// IsExpired will returned the expired state of the currently cached provider
    +// if there is one.  If there is no current provider, true will be returned.
    +func (c *ChainProvider) IsExpired() bool {
    +	if c.curr != nil {
    +		return c.curr.IsExpired()
    +	}
    +
    +	return true
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
    new file mode 100644
    index 0000000..7b8ebf5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
    @@ -0,0 +1,223 @@
    +// Package credentials provides credential retrieval and management
    +//
    +// The Credentials is the primary method of getting access to and managing
    +// credentials Values. Using dependency injection retrieval of the credential
    +// values is handled by a object which satisfies the Provider interface.
    +//
    +// By default the Credentials.Get() will cache the successful result of a
    +// Provider's Retrieve() until Provider.IsExpired() returns true. At which
    +// point Credentials will call Provider's Retrieve() to get new credential Value.
    +//
    +// The Provider is responsible for determining when credentials Value have expired.
    +// It is also important to note that Credentials will always call Retrieve the
    +// first time Credentials.Get() is called.
    +//
    +// Example of using the environment variable credentials.
    +//
    +//     creds := NewEnvCredentials()
    +//
    +//     // Retrieve the credentials value
    +//     credValue, err := creds.Get()
    +//     if err != nil {
    +//         // handle error
    +//     }
    +//
    +// Example of forcing credentials to expire and be refreshed on the next Get().
    +// This may be helpful to proactively expire credentials and refresh them sooner
    +// than they would naturally expire on their own.
    +//
    +//     creds := NewCredentials(&EC2RoleProvider{})
    +//     creds.Expire()
    +//     credsValue, err := creds.Get()
    +//     // New credentials will be retrieved instead of from cache.
    +//
    +//
    +// Custom Provider
    +//
    +// Each Provider built into this package also provides a helper method to generate
    +// a Credentials pointer setup with the provider. To use a custom Provider just
    +// create a type which satisfies the Provider interface and pass it to the
    +// NewCredentials method.
    +//
    +//     type MyProvider struct{}
    +//     func (m *MyProvider) Retrieve() (Value, error) {...}
    +//     func (m *MyProvider) IsExpired() bool {...}
    +//
    +//     creds := NewCredentials(&MyProvider{})
    +//     credValue, err := creds.Get()
    +//
    +package credentials
    +
    +import (
    +	"sync"
    +	"time"
    +)
    +
    +// AnonymousCredentials is an empty Credential object that can be used as
    +// dummy placeholder credentials for requests that do not need signed.
    +//
    +// This Credentials can be used to configure a service to not sign requests
    +// when making service API calls. For example, when accessing public
    +// s3 buckets.
    +//
    +//     svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
    +//     // Access public S3 buckets.
    +//
    +// @readonly
    +var AnonymousCredentials = NewStaticCredentials("", "", "")
    +
    +// A Value is the AWS credentials value for individual credential fields.
    +type Value struct {
    +	// AWS Access key ID
    +	AccessKeyID string
    +
    +	// AWS Secret Access Key
    +	SecretAccessKey string
    +
    +	// AWS Session Token
    +	SessionToken string
    +
    +	// Provider used to get credentials
    +	ProviderName string
    +}
    +
    +// A Provider is the interface for any component which will provide credentials
    +// Value. A provider is required to manage its own Expired state, and what to
    +// be expired means.
    +//
    +// The Provider should not need to implement its own mutexes, because
    +// that will be managed by Credentials.
    +type Provider interface {
    +	// Refresh returns nil if it successfully retrieved the value.
    +	// Error is returned if the value were not obtainable, or empty.
    +	Retrieve() (Value, error)
    +
    +	// IsExpired returns if the credentials are no longer valid, and need
    +	// to be retrieved.
    +	IsExpired() bool
    +}
    +
    +// A Expiry provides shared expiration logic to be used by credentials
    +// providers to implement expiry functionality.
    +//
    +// The best method to use this struct is as an anonymous field within the
    +// provider's struct.
    +//
    +// Example:
    +//     type EC2RoleProvider struct {
    +//         Expiry
    +//         ...
    +//     }
    +type Expiry struct {
    +	// The date/time when to expire on
    +	expiration time.Time
    +
    +	// If set will be used by IsExpired to determine the current time.
    +	// Defaults to time.Now if CurrentTime is not set.  Available for testing
    +	// to be able to mock out the current time.
    +	CurrentTime func() time.Time
    +}
    +
    +// SetExpiration sets the expiration IsExpired will check when called.
    +//
    +// If window is greater than 0 the expiration time will be reduced by the
    +// window value.
    +//
    +// Using a window is helpful to trigger credentials to expire sooner than
    +// the expiration time given to ensure no requests are made with expired
    +// tokens.
    +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
    +	e.expiration = expiration
    +	if window > 0 {
    +		e.expiration = e.expiration.Add(-window)
    +	}
    +}
    +
    +// IsExpired returns if the credentials are expired.
    +func (e *Expiry) IsExpired() bool {
    +	if e.CurrentTime == nil {
    +		e.CurrentTime = time.Now
    +	}
    +	return e.expiration.Before(e.CurrentTime())
    +}
    +
    +// A Credentials provides synchronous safe retrieval of AWS credentials Value.
    +// Credentials will cache the credentials value until they expire. Once the value
    +// expires the next Get will attempt to retrieve valid credentials.
    +//
    +// Credentials is safe to use across multiple goroutines and will manage the
    +// synchronous state so the Providers do not need to implement their own
    +// synchronization.
    +//
    +// The first Credentials.Get() will always call Provider.Retrieve() to get the
    +// first instance of the credentials Value. All calls to Get() after that
    +// will return the cached credentials Value until IsExpired() returns true.
    +type Credentials struct {
    +	creds        Value
    +	forceRefresh bool
    +	m            sync.Mutex
    +
    +	provider Provider
    +}
    +
    +// NewCredentials returns a pointer to a new Credentials with the provider set.
    +func NewCredentials(provider Provider) *Credentials {
    +	return &Credentials{
    +		provider:     provider,
    +		forceRefresh: true,
    +	}
    +}
    +
    +// Get returns the credentials value, or error if the credentials Value failed
    +// to be retrieved.
    +//
    +// Will return the cached credentials Value if it has not expired. If the
    +// credentials Value has expired the Provider's Retrieve() will be called
    +// to refresh the credentials.
    +//
    +// If Credentials.Expire() was called the credentials Value will be force
    +// expired, and the next call to Get() will cause them to be refreshed.
    +func (c *Credentials) Get() (Value, error) {
    +	c.m.Lock()
    +	defer c.m.Unlock()
    +
    +	if c.isExpired() {
    +		creds, err := c.provider.Retrieve()
    +		if err != nil {
    +			return Value{}, err
    +		}
    +		c.creds = creds
    +		c.forceRefresh = false
    +	}
    +
    +	return c.creds, nil
    +}
    +
    +// Expire expires the credentials and forces them to be retrieved on the
    +// next call to Get().
    +//
    +// This will override the Provider's expired state, and force Credentials
    +// to call the Provider's Retrieve().
    +func (c *Credentials) Expire() {
    +	c.m.Lock()
    +	defer c.m.Unlock()
    +
    +	c.forceRefresh = true
    +}
    +
    +// IsExpired returns if the credentials are no longer valid, and need
    +// to be retrieved.
    +//
    +// If the Credentials were forced to be expired with Expire() this will
    +// reflect that override.
    +func (c *Credentials) IsExpired() bool {
    +	c.m.Lock()
    +	defer c.m.Unlock()
    +
    +	return c.isExpired()
    +}
    +
    +// isExpired helper method wrapping the definition of expired credentials.
    +func (c *Credentials) isExpired() bool {
    +	return c.forceRefresh || c.provider.IsExpired()
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
    new file mode 100644
    index 0000000..aa9d689
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
    @@ -0,0 +1,178 @@
    +package ec2rolecreds
    +
    +import (
    +	"bufio"
    +	"encoding/json"
    +	"fmt"
    +	"path"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    +)
    +
    +// ProviderName provides a name of EC2Role provider
    +const ProviderName = "EC2RoleProvider"
    +
    +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
    +// those credentials are expired.
    +//
    +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
    +// or ExpiryWindow
    +//
    +//     p := &ec2rolecreds.EC2RoleProvider{
    +//         // Pass in a custom timeout to be used when requesting
    +//         // IAM EC2 Role credentials.
    +//         Client: ec2metadata.New(sess, aws.Config{
    +//             HTTPClient: &http.Client{Timeout: 10 * time.Second},
    +//         }),
    +//
    +//         // Do not use early expiry of credentials. If a non zero value is
    +//         // specified the credentials will be expired early
    +//         ExpiryWindow: 0,
    +//     }
    +type EC2RoleProvider struct {
    +	credentials.Expiry
    +
    +	// Required EC2Metadata client to use when connecting to EC2 metadata service.
    +	Client *ec2metadata.EC2Metadata
    +
    +	// ExpiryWindow will allow the credentials to trigger refreshing prior to
    +	// the credentials actually expiring. This is beneficial so race conditions
    +	// with expiring credentials do not cause request to fail unexpectedly
    +	// due to ExpiredTokenException exceptions.
    +	//
    +	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
    +	// 10 seconds before the credentials are actually expired.
    +	//
    +	// If ExpiryWindow is 0 or less it will be ignored.
    +	ExpiryWindow time.Duration
    +}
    +
    +// NewCredentials returns a pointer to a new Credentials object wrapping
    +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
    +// The ConfigProvider is satisfied by the session.Session type.
    +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
    +	p := &EC2RoleProvider{
    +		Client: ec2metadata.New(c),
    +	}
    +
    +	for _, option := range options {
    +		option(p)
    +	}
    +
    +	return credentials.NewCredentials(p)
    +}
    +
    +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
    +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
    +// metadata service.
    +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
    +	p := &EC2RoleProvider{
    +		Client: client,
    +	}
    +
    +	for _, option := range options {
    +		option(p)
    +	}
    +
    +	return credentials.NewCredentials(p)
    +}
    +
    +// Retrieve retrieves credentials from the EC2 service.
    +// Error will be returned if the request fails, or unable to extract
    +// the desired credentials.
    +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
    +	credsList, err := requestCredList(m.Client)
    +	if err != nil {
    +		return credentials.Value{ProviderName: ProviderName}, err
    +	}
    +
    +	if len(credsList) == 0 {
    +		return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
    +	}
    +	credsName := credsList[0]
    +
    +	roleCreds, err := requestCred(m.Client, credsName)
    +	if err != nil {
    +		return credentials.Value{ProviderName: ProviderName}, err
    +	}
    +
    +	m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
    +
    +	return credentials.Value{
    +		AccessKeyID:     roleCreds.AccessKeyID,
    +		SecretAccessKey: roleCreds.SecretAccessKey,
    +		SessionToken:    roleCreds.Token,
    +		ProviderName:    ProviderName,
    +	}, nil
    +}
    +
    +// A ec2RoleCredRespBody provides the shape for unmarshalling credential
    +// request responses.
    +type ec2RoleCredRespBody struct {
    +	// Success State
    +	Expiration      time.Time
    +	AccessKeyID     string
    +	SecretAccessKey string
    +	Token           string
    +
    +	// Error state
    +	Code    string
    +	Message string
    +}
    +
    +const iamSecurityCredsPath = "/iam/security-credentials"
    +
    +// requestCredList requests a list of credentials from the EC2 service.
    +// If there are no credentials, or there is an error making or receiving the request
    +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
    +	resp, err := client.GetMetadata(iamSecurityCredsPath)
    +	if err != nil {
    +		return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
    +	}
    +
    +	credsList := []string{}
    +	s := bufio.NewScanner(strings.NewReader(resp))
    +	for s.Scan() {
    +		credsList = append(credsList, s.Text())
    +	}
    +
    +	if err := s.Err(); err != nil {
    +		return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
    +	}
    +
    +	return credsList, nil
    +}
    +
    +// requestCred requests the credentials for a specific credentials from the EC2 service.
    +//
    +// If the credentials cannot be found, or there is an error reading the response
    +// and error will be returned.
    +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
    +	resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
    +	if err != nil {
    +		return ec2RoleCredRespBody{},
    +			awserr.New("EC2RoleRequestError",
    +				fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
    +				err)
    +	}
    +
    +	respCreds := ec2RoleCredRespBody{}
    +	if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
    +		return ec2RoleCredRespBody{},
    +			awserr.New("SerializationError",
    +				fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
    +				err)
    +	}
    +
    +	if respCreds.Code != "Success" {
    +		// If an error code was returned something failed requesting the role.
    +		return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
    +	}
    +
    +	return respCreds, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
    new file mode 100644
    index 0000000..a4cec5c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
    @@ -0,0 +1,191 @@
    +// Package endpointcreds provides support for retrieving credentials from an
    +// arbitrary HTTP endpoint.
    +//
    +// The credentials endpoint Provider can receive both static and refreshable
    +// credentials that will expire. Credentials are static when an "Expiration"
    +// value is not provided in the endpoint's response.
    +//
    +// Static credentials will never expire once they have been retrieved. The format
    +// of the static credentials response:
    +//    {
    +//        "AccessKeyId" : "MUA...",
    +//        "SecretAccessKey" : "/7PC5om....",
    +//    }
    +//
    +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
    +// value in the response. The format of the refreshable credentials response:
    +//    {
    +//        "AccessKeyId" : "MUA...",
    +//        "SecretAccessKey" : "/7PC5om....",
    +//        "Token" : "AQoDY....=",
    +//        "Expiration" : "2016-02-25T06:03:31Z"
    +//    }
    +//
    +// Errors should be returned in the following format and only returned with 400
    +// or 500 HTTP status codes.
    +//    {
    +//        "code": "ErrorCode",
    +//        "message": "Helpful error message."
    +//    }
    +package endpointcreds
    +
    +import (
    +	"encoding/json"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/client/metadata"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// ProviderName is the name of the credentials provider.
    +const ProviderName = `CredentialsEndpointProvider`
    +
    +// Provider satisfies the credentials.Provider interface, and is a client to
    +// retrieve credentials from an arbitrary endpoint.
    +type Provider struct {
    +	staticCreds bool
    +	credentials.Expiry
    +
    +	// Requires a AWS Client to make HTTP requests to the endpoint with.
    +	// the Endpoint the request will be made to is provided by the aws.Config's
    +	// Endpoint value.
    +	Client *client.Client
    +
    +	// ExpiryWindow will allow the credentials to trigger refreshing prior to
    +	// the credentials actually expiring. This is beneficial so race conditions
    +	// with expiring credentials do not cause request to fail unexpectedly
    +	// due to ExpiredTokenException exceptions.
    +	//
    +	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
    +	// 10 seconds before the credentials are actually expired.
    +	//
    +	// If ExpiryWindow is 0 or less it will be ignored.
    +	ExpiryWindow time.Duration
    +}
    +
    +// NewProviderClient returns a credentials Provider for retrieving AWS credentials
    +// from arbitrary endpoint.
    +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
    +	p := &Provider{
    +		Client: client.New(
    +			cfg,
    +			metadata.ClientInfo{
    +				ServiceName: "CredentialsEndpoint",
    +				Endpoint:    endpoint,
    +			},
    +			handlers,
    +		),
    +	}
    +
    +	p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
    +	p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
    +	p.Client.Handlers.Validate.Clear()
    +	p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
    +
    +	for _, option := range options {
    +		option(p)
    +	}
    +
    +	return p
    +}
    +
    +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
    +// from an arbitrary endpoint concurrently. The client will request the
    +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
    +	return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
    +}
    +
    +// IsExpired returns true if the credentials retrieved are expired, or not yet
    +// retrieved.
    +func (p *Provider) IsExpired() bool {
    +	if p.staticCreds {
    +		return false
    +	}
    +	return p.Expiry.IsExpired()
    +}
    +
    +// Retrieve will attempt to request the credentials from the endpoint the Provider
    +// was configured for. And error will be returned if the retrieval fails.
    +func (p *Provider) Retrieve() (credentials.Value, error) {
    +	resp, err := p.getCredentials()
    +	if err != nil {
    +		return credentials.Value{ProviderName: ProviderName},
    +			awserr.New("CredentialsEndpointError", "failed to load credentials", err)
    +	}
    +
    +	if resp.Expiration != nil {
    +		p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
    +	} else {
    +		p.staticCreds = true
    +	}
    +
    +	return credentials.Value{
    +		AccessKeyID:     resp.AccessKeyID,
    +		SecretAccessKey: resp.SecretAccessKey,
    +		SessionToken:    resp.Token,
    +		ProviderName:    ProviderName,
    +	}, nil
    +}
    +
    +type getCredentialsOutput struct {
    +	Expiration      *time.Time
    +	AccessKeyID     string
    +	SecretAccessKey string
    +	Token           string
    +}
    +
    +type errorOutput struct {
    +	Code    string `json:"code"`
    +	Message string `json:"message"`
    +}
    +
    +func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
    +	op := &request.Operation{
    +		Name:       "GetCredentials",
    +		HTTPMethod: "GET",
    +	}
    +
    +	out := &getCredentialsOutput{}
    +	req := p.Client.NewRequest(op, nil, out)
    +	req.HTTPRequest.Header.Set("Accept", "application/json")
    +
    +	return out, req.Send()
    +}
    +
    +func validateEndpointHandler(r *request.Request) {
    +	if len(r.ClientInfo.Endpoint) == 0 {
    +		r.Error = aws.ErrMissingEndpoint
    +	}
    +}
    +
    +func unmarshalHandler(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +
    +	out := r.Data.(*getCredentialsOutput)
    +	if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
    +		r.Error = awserr.New("SerializationError",
    +			"failed to decode endpoint credentials",
    +			err,
    +		)
    +	}
    +}
    +
    +func unmarshalError(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +
    +	var errOut errorOutput
    +	if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
    +		r.Error = awserr.New("SerializationError",
    +			"failed to decode endpoint credentials",
    +			err,
    +		)
    +	}
    +
    +	// Response body format is not consistent between metadata endpoints.
    +	// Grab the error message as a string and include that as the source error
    +	r.Error = awserr.New(errOut.Code, errOut.Message, nil)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
    new file mode 100644
    index 0000000..96655bc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
    @@ -0,0 +1,77 @@
    +package credentials
    +
    +import (
    +	"os"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +)
    +
    +// EnvProviderName provides a name of Env provider
    +const EnvProviderName = "EnvProvider"
    +
    +var (
    +	// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
    +	// found in the process's environment.
    +	//
    +	// @readonly
    +	ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
    +
    +	// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
    +	// can't be found in the process's environment.
    +	//
    +	// @readonly
    +	ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
    +)
    +
    +// A EnvProvider retrieves credentials from the environment variables of the
    +// running process. Environment credentials never expire.
    +//
    +// Environment variables used:
    +//
    +// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
    +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
    +type EnvProvider struct {
    +	retrieved bool
    +}
    +
    +// NewEnvCredentials returns a pointer to a new Credentials object
    +// wrapping the environment variable provider.
    +func NewEnvCredentials() *Credentials {
    +	return NewCredentials(&EnvProvider{})
    +}
    +
    +// Retrieve retrieves the keys from the environment.
    +func (e *EnvProvider) Retrieve() (Value, error) {
    +	e.retrieved = false
    +
    +	id := os.Getenv("AWS_ACCESS_KEY_ID")
    +	if id == "" {
    +		id = os.Getenv("AWS_ACCESS_KEY")
    +	}
    +
    +	secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
    +	if secret == "" {
    +		secret = os.Getenv("AWS_SECRET_KEY")
    +	}
    +
    +	if id == "" {
    +		return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
    +	}
    +
    +	if secret == "" {
    +		return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
    +	}
    +
    +	e.retrieved = true
    +	return Value{
    +		AccessKeyID:     id,
    +		SecretAccessKey: secret,
    +		SessionToken:    os.Getenv("AWS_SESSION_TOKEN"),
    +		ProviderName:    EnvProviderName,
    +	}, nil
    +}
    +
    +// IsExpired returns if the credentials have been retrieved.
    +func (e *EnvProvider) IsExpired() bool {
    +	return !e.retrieved
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
    new file mode 100644
    index 0000000..7fc91d9
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
    @@ -0,0 +1,12 @@
    +[default]
    +aws_access_key_id = accessKey
    +aws_secret_access_key = secret
    +aws_session_token = token
    +
    +[no_token]
    +aws_access_key_id = accessKey
    +aws_secret_access_key = secret
    +
    +[with_colon]
    +aws_access_key_id: accessKey
    +aws_secret_access_key: secret
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
    new file mode 100644
    index 0000000..7fb7cbf
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
    @@ -0,0 +1,151 @@
    +package credentials
    +
    +import (
    +	"fmt"
    +	"os"
    +	"path/filepath"
    +
    +	"github.com/go-ini/ini"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +)
    +
    +// SharedCredsProviderName provides a name of SharedCreds provider
    +const SharedCredsProviderName = "SharedCredentialsProvider"
    +
    +var (
    +	// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
    +	//
    +	// @readonly
    +	ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
    +)
    +
    +// A SharedCredentialsProvider retrieves credentials from the current user's home
    +// directory, and keeps track if those credentials are expired.
    +//
    +// Profile ini file example: $HOME/.aws/credentials
    +type SharedCredentialsProvider struct {
    +	// Path to the shared credentials file.
    +	//
    +	// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
    +	// env value is empty will default to current user's home directory.
    +	// Linux/OSX: "$HOME/.aws/credentials"
    +	// Windows:   "%USERPROFILE%\.aws\credentials"
    +	Filename string
    +
    +	// AWS Profile to extract credentials from the shared credentials file. If empty
    +	// will default to environment variable "AWS_PROFILE" or "default" if
    +	// environment variable is also not set.
    +	Profile string
    +
    +	// retrieved states if the credentials have been successfully retrieved.
    +	retrieved bool
    +}
    +
    +// NewSharedCredentials returns a pointer to a new Credentials object
    +// wrapping the Profile file provider.
    +func NewSharedCredentials(filename, profile string) *Credentials {
    +	return NewCredentials(&SharedCredentialsProvider{
    +		Filename: filename,
    +		Profile:  profile,
    +	})
    +}
    +
    +// Retrieve reads and extracts the shared credentials from the current
    +// users home directory.
    +func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
    +	p.retrieved = false
    +
    +	filename, err := p.filename()
    +	if err != nil {
    +		return Value{ProviderName: SharedCredsProviderName}, err
    +	}
    +
    +	creds, err := loadProfile(filename, p.profile())
    +	if err != nil {
    +		return Value{ProviderName: SharedCredsProviderName}, err
    +	}
    +
    +	p.retrieved = true
    +	return creds, nil
    +}
    +
    +// IsExpired returns if the shared credentials have expired.
    +func (p *SharedCredentialsProvider) IsExpired() bool {
    +	return !p.retrieved
    +}
    +
    +// loadProfiles loads from the file pointed to by shared credentials filename for profile.
    +// The credentials retrieved from the profile will be returned or error. Error will be
    +// returned if it fails to read from the file, or the data is invalid.
    +func loadProfile(filename, profile string) (Value, error) {
    +	config, err := ini.Load(filename)
    +	if err != nil {
    +		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
    +	}
    +	iniProfile, err := config.GetSection(profile)
    +	if err != nil {
    +		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
    +	}
    +
    +	id, err := iniProfile.GetKey("aws_access_key_id")
    +	if err != nil {
    +		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
    +			fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
    +			err)
    +	}
    +
    +	secret, err := iniProfile.GetKey("aws_secret_access_key")
    +	if err != nil {
    +		return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
    +			fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
    +			nil)
    +	}
    +
    +	// Default to empty string if not found
    +	token := iniProfile.Key("aws_session_token")
    +
    +	return Value{
    +		AccessKeyID:     id.String(),
    +		SecretAccessKey: secret.String(),
    +		SessionToken:    token.String(),
    +		ProviderName:    SharedCredsProviderName,
    +	}, nil
    +}
    +
    +// filename returns the filename to use to read AWS shared credentials.
    +//
    +// Will return an error if the user's home directory path cannot be found.
    +func (p *SharedCredentialsProvider) filename() (string, error) {
    +	if p.Filename == "" {
    +		if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
    +			return p.Filename, nil
    +		}
    +
    +		homeDir := os.Getenv("HOME") // *nix
    +		if homeDir == "" {           // Windows
    +			homeDir = os.Getenv("USERPROFILE")
    +		}
    +		if homeDir == "" {
    +			return "", ErrSharedCredentialsHomeNotFound
    +		}
    +
    +		p.Filename = filepath.Join(homeDir, ".aws", "credentials")
    +	}
    +
    +	return p.Filename, nil
    +}
    +
    +// profile returns the AWS shared credentials profile.  If empty will read
    +// environment variable "AWS_PROFILE". If that is not set profile will
    +// return "default".
    +func (p *SharedCredentialsProvider) profile() string {
    +	if p.Profile == "" {
    +		p.Profile = os.Getenv("AWS_PROFILE")
    +	}
    +	if p.Profile == "" {
    +		p.Profile = "default"
    +	}
    +
    +	return p.Profile
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
    new file mode 100644
    index 0000000..4f5dab3
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
    @@ -0,0 +1,57 @@
    +package credentials
    +
    +import (
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +)
    +
    +// StaticProviderName provides a name of Static provider
    +const StaticProviderName = "StaticProvider"
    +
    +var (
    +	// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
    +	//
    +	// @readonly
    +	ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
    +)
    +
    +// A StaticProvider is a set of credentials which are set programmatically,
    +// and will never expire.
    +type StaticProvider struct {
    +	Value
    +}
    +
    +// NewStaticCredentials returns a pointer to a new Credentials object
    +// wrapping a static credentials value provider.
    +func NewStaticCredentials(id, secret, token string) *Credentials {
    +	return NewCredentials(&StaticProvider{Value: Value{
    +		AccessKeyID:     id,
    +		SecretAccessKey: secret,
    +		SessionToken:    token,
    +	}})
    +}
    +
    +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
    +// wrapping the static credentials value provide. Same as NewStaticCredentials
    +// but takes the creds Value instead of individual fields
    +func NewStaticCredentialsFromCreds(creds Value) *Credentials {
    +	return NewCredentials(&StaticProvider{Value: creds})
    +}
    +
    +// Retrieve returns the credentials or error if the credentials are invalid.
    +func (s *StaticProvider) Retrieve() (Value, error) {
    +	if s.AccessKeyID == "" || s.SecretAccessKey == "" {
    +		return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
    +	}
    +
    +	if len(s.Value.ProviderName) == 0 {
    +		s.Value.ProviderName = StaticProviderName
    +	}
    +	return s.Value, nil
    +}
    +
    +// IsExpired returns if the credentials are expired.
    +//
    +// For StaticProvider, the credentials never expired.
    +func (s *StaticProvider) IsExpired() bool {
    +	return false
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
    new file mode 100644
    index 0000000..30c847a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
    @@ -0,0 +1,161 @@
    +// Package stscreds are credential Providers to retrieve STS AWS credentials.
    +//
    +// STS provides multiple ways to retrieve credentials which can be used when making
    +// future AWS service API operation calls.
    +package stscreds
    +
    +import (
    +	"fmt"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/service/sts"
    +)
    +
    +// ProviderName provides a name of AssumeRole provider
    +const ProviderName = "AssumeRoleProvider"
    +
    +// AssumeRoler represents the minimal subset of the STS client API used by this provider.
    +type AssumeRoler interface {
    +	AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
    +}
    +
    +// DefaultDuration is the default amount of time in minutes that the credentials
    +// will be valid for.
    +var DefaultDuration = time.Duration(15) * time.Minute
    +
    +// AssumeRoleProvider retrieves temporary credentials from the STS service, and
    +// keeps track of their expiration time. This provider must be used explicitly,
    +// as it is not included in the credentials chain.
    +type AssumeRoleProvider struct {
    +	credentials.Expiry
    +
    +	// STS client to make assume role request with.
    +	Client AssumeRoler
    +
    +	// Role to be assumed.
    +	RoleARN string
    +
    +	// Session name, if you wish to reuse the credentials elsewhere.
    +	RoleSessionName string
    +
    +	// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
    +	Duration time.Duration
    +
    +	// Optional ExternalID to pass along, defaults to nil if not set.
    +	ExternalID *string
    +
    +	// The policy plain text must be 2048 bytes or shorter. However, an internal
    +	// conversion compresses it into a packed binary format with a separate limit.
    +	// The PackedPolicySize response element indicates by percentage how close to
    +	// the upper size limit the policy is, with 100% equaling the maximum allowed
    +	// size.
    +	Policy *string
    +
    +	// The identification number of the MFA device that is associated with the user
    +	// who is making the AssumeRole call. Specify this value if the trust policy
    +	// of the role being assumed includes a condition that requires MFA authentication.
    +	// The value is either the serial number for a hardware device (such as GAHT12345678)
    +	// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
    +	SerialNumber *string
    +
    +	// The value provided by the MFA device, if the trust policy of the role being
    +	// assumed requires MFA (that is, if the policy includes a condition that tests
    +	// for MFA). If the role being assumed requires MFA and if the TokenCode value
    +	// is missing or expired, the AssumeRole call returns an "access denied" error.
    +	TokenCode *string
    +
    +	// ExpiryWindow will allow the credentials to trigger refreshing prior to
    +	// the credentials actually expiring. This is beneficial so race conditions
    +	// with expiring credentials do not cause request to fail unexpectedly
    +	// due to ExpiredTokenException exceptions.
    +	//
    +	// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
    +	// 10 seconds before the credentials are actually expired.
    +	//
    +	// If ExpiryWindow is 0 or less it will be ignored.
    +	ExpiryWindow time.Duration
    +}
    +
    +// NewCredentials returns a pointer to a new Credentials object wrapping the
    +// AssumeRoleProvider. The credentials will expire every 15 minutes and the
    +// role will be named after a nanosecond timestamp of this operation.
    +//
    +// Takes a Config provider to create the STS client. The ConfigProvider is
    +// satisfied by the session.Session type.
    +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
    +	p := &AssumeRoleProvider{
    +		Client:   sts.New(c),
    +		RoleARN:  roleARN,
    +		Duration: DefaultDuration,
    +	}
    +
    +	for _, option := range options {
    +		option(p)
    +	}
    +
    +	return credentials.NewCredentials(p)
    +}
    +
    +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
    +// AssumeRoleProvider. The credentials will expire every 15 minutes and the
    +// role will be named after a nanosecond timestamp of this operation.
    +//
    +// Takes an AssumeRoler which can be satisfiede by the STS client.
    +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
    +	p := &AssumeRoleProvider{
    +		Client:   svc,
    +		RoleARN:  roleARN,
    +		Duration: DefaultDuration,
    +	}
    +
    +	for _, option := range options {
    +		option(p)
    +	}
    +
    +	return credentials.NewCredentials(p)
    +}
    +
    +// Retrieve generates a new set of temporary credentials using STS.
    +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
    +
    +	// Apply defaults where parameters are not set.
    +	if p.RoleSessionName == "" {
    +		// Try to work out a role name that will hopefully end up unique.
    +		p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
    +	}
    +	if p.Duration == 0 {
    +		// Expire as often as AWS permits.
    +		p.Duration = DefaultDuration
    +	}
    +	input := &sts.AssumeRoleInput{
    +		DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
    +		RoleArn:         aws.String(p.RoleARN),
    +		RoleSessionName: aws.String(p.RoleSessionName),
    +		ExternalId:      p.ExternalID,
    +	}
    +	if p.Policy != nil {
    +		input.Policy = p.Policy
    +	}
    +	if p.SerialNumber != nil && p.TokenCode != nil {
    +		input.SerialNumber = p.SerialNumber
    +		input.TokenCode = p.TokenCode
    +	}
    +	roleOutput, err := p.Client.AssumeRole(input)
    +
    +	if err != nil {
    +		return credentials.Value{ProviderName: ProviderName}, err
    +	}
    +
    +	// We will proactively generate new credentials before they expire.
    +	p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
    +
    +	return credentials.Value{
    +		AccessKeyID:     *roleOutput.Credentials.AccessKeyId,
    +		SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
    +		SessionToken:    *roleOutput.Credentials.SessionToken,
    +		ProviderName:    ProviderName,
    +	}, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
    new file mode 100644
    index 0000000..8dbbf67
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
    @@ -0,0 +1,130 @@
    +// Package defaults is a collection of helpers to retrieve the SDK's default
    +// configuration and handlers.
    +//
    +// Generally this package shouldn't be used directly, but session.Session
    +// instead. This package is useful when you need to reset the defaults
    +// of a session or service client to the SDK defaults before setting
    +// additional parameters.
    +package defaults
    +
    +import (
    +	"fmt"
    +	"net/http"
    +	"os"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/corehandlers"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
    +	"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
    +	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/endpoints"
    +)
    +
    +// A Defaults provides a collection of default values for SDK clients.
    +type Defaults struct {
    +	Config   *aws.Config
    +	Handlers request.Handlers
    +}
    +
    +// Get returns the SDK's default values with Config and handlers pre-configured.
    +func Get() Defaults {
    +	cfg := Config()
    +	handlers := Handlers()
    +	cfg.Credentials = CredChain(cfg, handlers)
    +
    +	return Defaults{
    +		Config:   cfg,
    +		Handlers: handlers,
    +	}
    +}
    +
    +// Config returns the default configuration without credentials.
    +// To retrieve a config with credentials also included use
    +// `defaults.Get().Config` instead.
    +//
    +// Generally you shouldn't need to use this method directly, but
    +// is available if you need to reset the configuration of an
    +// existing service client or session.
    +func Config() *aws.Config {
    +	return aws.NewConfig().
    +		WithCredentials(credentials.AnonymousCredentials).
    +		WithRegion(os.Getenv("AWS_REGION")).
    +		WithHTTPClient(http.DefaultClient).
    +		WithMaxRetries(aws.UseServiceDefaultRetries).
    +		WithLogger(aws.NewDefaultLogger()).
    +		WithLogLevel(aws.LogOff).
    +		WithSleepDelay(time.Sleep)
    +}
    +
    +// Handlers returns the default request handlers.
    +//
    +// Generally you shouldn't need to use this method directly, but
    +// is available if you need to reset the request handlers of an
    +// existing service client or session.
    +func Handlers() request.Handlers {
    +	var handlers request.Handlers
    +
    +	handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
    +	handlers.Validate.AfterEachFn = request.HandlerListStopOnError
    +	handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
    +	handlers.Build.AfterEachFn = request.HandlerListStopOnError
    +	handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
    +	handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
    +	handlers.Send.PushBackNamed(corehandlers.SendHandler)
    +	handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
    +	handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
    +
    +	return handlers
    +}
    +
    +// CredChain returns the default credential chain.
    +//
    +// Generally you shouldn't need to use this method directly, but
    +// is available if you need to reset the credentials of an
    +// existing service client or session's Config.
    +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
    +	return credentials.NewCredentials(&credentials.ChainProvider{
    +		VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
    +		Providers: []credentials.Provider{
    +			&credentials.EnvProvider{},
    +			&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
    +			RemoteCredProvider(*cfg, handlers),
    +		},
    +	})
    +}
    +
    +// RemoteCredProvider returns a credenitials provider for the default remote
    +// endpoints such as EC2 or ECS Roles.
    +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
    +	ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
    +
    +	if len(ecsCredURI) > 0 {
    +		return ecsCredProvider(cfg, handlers, ecsCredURI)
    +	}
    +
    +	return ec2RoleProvider(cfg, handlers)
    +}
    +
    +func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
    +	const host = `169.254.170.2`
    +
    +	return endpointcreds.NewProviderClient(cfg, handlers,
    +		fmt.Sprintf("http://%s%s", host, uri),
    +		func(p *endpointcreds.Provider) {
    +			p.ExpiryWindow = 5 * time.Minute
    +		},
    +	)
    +}
    +
    +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
    +	endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
    +		aws.StringValue(cfg.Region), true, false)
    +
    +	return &ec2rolecreds.EC2RoleProvider{
    +		Client:       ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
    +		ExpiryWindow: 5 * time.Minute,
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
    new file mode 100644
    index 0000000..e5755d1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
    @@ -0,0 +1,162 @@
    +package ec2metadata
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"path"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// GetMetadata uses the path provided to request information from the EC2
    +// instance metdata service. The content will be returned as a string, or
    +// error if the request failed.
    +func (c *EC2Metadata) GetMetadata(p string) (string, error) {
    +	op := &request.Operation{
    +		Name:       "GetMetadata",
    +		HTTPMethod: "GET",
    +		HTTPPath:   path.Join("/", "meta-data", p),
    +	}
    +
    +	output := &metadataOutput{}
    +	req := c.NewRequest(op, nil, output)
    +
    +	return output.Content, req.Send()
    +}
    +
    +// GetUserData returns the userdata that was configured for the service. If
    +// there is no user-data setup for the EC2 instance a "NotFoundError" error
    +// code will be returned.
    +func (c *EC2Metadata) GetUserData() (string, error) {
    +	op := &request.Operation{
    +		Name:       "GetUserData",
    +		HTTPMethod: "GET",
    +		HTTPPath:   path.Join("/", "user-data"),
    +	}
    +
    +	output := &metadataOutput{}
    +	req := c.NewRequest(op, nil, output)
    +	req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
    +		if r.HTTPResponse.StatusCode == http.StatusNotFound {
    +			r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
    +		}
    +	})
    +
    +	return output.Content, req.Send()
    +}
    +
    +// GetDynamicData uses the path provided to request information from the EC2
    +// instance metadata service for dynamic data. The content will be returned
    +// as a string, or error if the request failed.
    +func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
    +	op := &request.Operation{
    +		Name:       "GetDynamicData",
    +		HTTPMethod: "GET",
    +		HTTPPath:   path.Join("/", "dynamic", p),
    +	}
    +
    +	output := &metadataOutput{}
    +	req := c.NewRequest(op, nil, output)
    +
    +	return output.Content, req.Send()
    +}
    +
    +// GetInstanceIdentityDocument retrieves an identity document describing an
    +// instance. Error is returned if the request fails or is unable to parse
    +// the response.
    +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
    +	resp, err := c.GetDynamicData("instance-identity/document")
    +	if err != nil {
    +		return EC2InstanceIdentityDocument{},
    +			awserr.New("EC2MetadataRequestError",
    +				"failed to get EC2 instance identity document", err)
    +	}
    +
    +	doc := EC2InstanceIdentityDocument{}
    +	if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
    +		return EC2InstanceIdentityDocument{},
    +			awserr.New("SerializationError",
    +				"failed to decode EC2 instance identity document", err)
    +	}
    +
    +	return doc, nil
    +}
    +
    +// IAMInfo retrieves IAM info from the metadata API
    +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
    +	resp, err := c.GetMetadata("iam/info")
    +	if err != nil {
    +		return EC2IAMInfo{},
    +			awserr.New("EC2MetadataRequestError",
    +				"failed to get EC2 IAM info", err)
    +	}
    +
    +	info := EC2IAMInfo{}
    +	if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
    +		return EC2IAMInfo{},
    +			awserr.New("SerializationError",
    +				"failed to decode EC2 IAM info", err)
    +	}
    +
    +	if info.Code != "Success" {
    +		errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
    +		return EC2IAMInfo{},
    +			awserr.New("EC2MetadataError", errMsg, nil)
    +	}
    +
    +	return info, nil
    +}
    +
    +// Region returns the region the instance is running in.
    +func (c *EC2Metadata) Region() (string, error) {
    +	resp, err := c.GetMetadata("placement/availability-zone")
    +	if err != nil {
    +		return "", err
    +	}
    +
    +	// returns region without the suffix. Eg: us-west-2a becomes us-west-2
    +	return resp[:len(resp)-1], nil
    +}
    +
    +// Available returns if the application has access to the EC2 Metadata service.
    +// Can be used to determine if application is running within an EC2 Instance and
    +// the metadata service is available.
    +func (c *EC2Metadata) Available() bool {
    +	if _, err := c.GetMetadata("instance-id"); err != nil {
    +		return false
    +	}
    +
    +	return true
    +}
    +
    +// An EC2IAMInfo provides the shape for unmarshalling
    +// an IAM info from the metadata API
    +type EC2IAMInfo struct {
    +	Code               string
    +	LastUpdated        time.Time
    +	InstanceProfileArn string
    +	InstanceProfileID  string
    +}
    +
    +// An EC2InstanceIdentityDocument provides the shape for unmarshalling
    +// an instance identity document
    +type EC2InstanceIdentityDocument struct {
    +	DevpayProductCodes []string  `json:"devpayProductCodes"`
    +	AvailabilityZone   string    `json:"availabilityZone"`
    +	PrivateIP          string    `json:"privateIp"`
    +	Version            string    `json:"version"`
    +	Region             string    `json:"region"`
    +	InstanceID         string    `json:"instanceId"`
    +	BillingProducts    []string  `json:"billingProducts"`
    +	InstanceType       string    `json:"instanceType"`
    +	AccountID          string    `json:"accountId"`
    +	PendingTime        time.Time `json:"pendingTime"`
    +	ImageID            string    `json:"imageId"`
    +	KernelID           string    `json:"kernelId"`
    +	RamdiskID          string    `json:"ramdiskId"`
    +	Architecture       string    `json:"architecture"`
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
    new file mode 100644
    index 0000000..5b4379d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
    @@ -0,0 +1,124 @@
    +// Package ec2metadata provides the client for making API calls to the
    +// EC2 Metadata service.
    +package ec2metadata
    +
    +import (
    +	"bytes"
    +	"errors"
    +	"io"
    +	"net/http"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/client/metadata"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// ServiceName is the name of the service.
    +const ServiceName = "ec2metadata"
    +
    +// A EC2Metadata is an EC2 Metadata service Client.
    +type EC2Metadata struct {
    +	*client.Client
    +}
    +
    +// New creates a new instance of the EC2Metadata client with a session.
    +// This client is safe to use across multiple goroutines.
    +//
    +//
    +// Example:
    +//     // Create a EC2Metadata client from just a session.
    +//     svc := ec2metadata.New(mySession)
    +//
    +//     // Create a EC2Metadata client with additional configuration
    +//     svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
    +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
    +	c := p.ClientConfig(ServiceName, cfgs...)
    +	return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
    +}
    +
    +// NewClient returns a new EC2Metadata client. Should be used to create
    +// a client when not using a session. Generally using just New with a session
    +// is preferred.
    +//
    +// If an unmodified HTTP client is provided from the stdlib default, or no client
    +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
    +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
    +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
    +	if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
    +		// If the http client is unmodified and this feature is not disabled
    +		// set custom timeouts for EC2Metadata requests.
    +		cfg.HTTPClient = &http.Client{
    +			// use a shorter timeout than default because the metadata
    +			// service is local if it is running, and to fail faster
    +			// if not running on an ec2 instance.
    +			Timeout: 5 * time.Second,
    +		}
    +	}
    +
    +	svc := &EC2Metadata{
    +		Client: client.New(
    +			cfg,
    +			metadata.ClientInfo{
    +				ServiceName: ServiceName,
    +				Endpoint:    endpoint,
    +				APIVersion:  "latest",
    +			},
    +			handlers,
    +		),
    +	}
    +
    +	svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
    +	svc.Handlers.UnmarshalError.PushBack(unmarshalError)
    +	svc.Handlers.Validate.Clear()
    +	svc.Handlers.Validate.PushBack(validateEndpointHandler)
    +
    +	// Add additional options to the service config
    +	for _, option := range opts {
    +		option(svc.Client)
    +	}
    +
    +	return svc
    +}
    +
    +func httpClientZero(c *http.Client) bool {
    +	return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
    +}
    +
    +type metadataOutput struct {
    +	Content string
    +}
    +
    +func unmarshalHandler(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +	b := &bytes.Buffer{}
    +	if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
    +		r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
    +		return
    +	}
    +
    +	if data, ok := r.Data.(*metadataOutput); ok {
    +		data.Content = b.String()
    +	}
    +}
    +
    +func unmarshalError(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +	b := &bytes.Buffer{}
    +	if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
    +		r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
    +		return
    +	}
    +
    +	// Response body format is not consistent between metadata endpoints.
    +	// Grab the error message as a string and include that as the source error
    +	r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
    +}
    +
    +func validateEndpointHandler(r *request.Request) {
    +	if r.ClientInfo.Endpoint == "" {
    +		r.Error = aws.ErrMissingEndpoint
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/errors.go
    new file mode 100644
    index 0000000..5766361
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/errors.go
    @@ -0,0 +1,17 @@
    +package aws
    +
    +import "github.com/aws/aws-sdk-go/aws/awserr"
    +
    +var (
    +	// ErrMissingRegion is an error that is returned if region configuration is
    +	// not found.
    +	//
    +	// @readonly
    +	ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
    +
    +	// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
    +	// resolved for a service.
    +	//
    +	// @readonly
    +	ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
    +)
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/logger.go
    new file mode 100644
    index 0000000..db87188
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/logger.go
    @@ -0,0 +1,112 @@
    +package aws
    +
    +import (
    +	"log"
    +	"os"
    +)
    +
    +// A LogLevelType defines the level logging should be performed at. Used to instruct
    +// the SDK which statements should be logged.
    +type LogLevelType uint
    +
    +// LogLevel returns the pointer to a LogLevel. Should be used to workaround
    +// not being able to take the address of a non-composite literal.
    +func LogLevel(l LogLevelType) *LogLevelType {
    +	return &l
    +}
    +
    +// Value returns the LogLevel value or the default value LogOff if the LogLevel
    +// is nil. Safe to use on nil value LogLevelTypes.
    +func (l *LogLevelType) Value() LogLevelType {
    +	if l != nil {
    +		return *l
    +	}
    +	return LogOff
    +}
    +
    +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
    +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
    +// LogLevel is nill, will default to LogOff comparison.
    +func (l *LogLevelType) Matches(v LogLevelType) bool {
    +	c := l.Value()
    +	return c&v == v
    +}
    +
    +// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
    +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
    +// to LogOff comparison.
    +func (l *LogLevelType) AtLeast(v LogLevelType) bool {
    +	c := l.Value()
    +	return c >= v
    +}
    +
    +const (
    +	// LogOff states that no logging should be performed by the SDK. This is the
    +	// default state of the SDK, and should be use to disable all logging.
    +	LogOff LogLevelType = iota * 0x1000
    +
    +	// LogDebug state that debug output should be logged by the SDK. This should
    +	// be used to inspect request made and responses received.
    +	LogDebug
    +)
    +
    +// Debug Logging Sub Levels
    +const (
    +	// LogDebugWithSigning states that the SDK should log request signing and
    +	// presigning events. This should be used to log the signing details of
    +	// requests for debugging. Will also enable LogDebug.
    +	LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
    +
    +	// LogDebugWithHTTPBody states the SDK should log HTTP request and response
    +	// HTTP bodys in addition to the headers and path. This should be used to
    +	// see the body content of requests and responses made while using the SDK
    +	// Will also enable LogDebug.
    +	LogDebugWithHTTPBody
    +
    +	// LogDebugWithRequestRetries states the SDK should log when service requests will
    +	// be retried. This should be used to log when you want to log when service
    +	// requests are being retried. Will also enable LogDebug.
    +	LogDebugWithRequestRetries
    +
    +	// LogDebugWithRequestErrors states the SDK should log when service requests fail
    +	// to build, send, validate, or unmarshal.
    +	LogDebugWithRequestErrors
    +)
    +
    +// A Logger is a minimalistic interface for the SDK to log messages to. Should
    +// be used to provide custom logging writers for the SDK to use.
    +type Logger interface {
    +	Log(...interface{})
    +}
    +
    +// A LoggerFunc is a convenience type to convert a function taking a variadic
    +// list of arguments and wrap it so the Logger interface can be used.
    +//
    +// Example:
    +//     s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
    +//         fmt.Fprintln(os.Stdout, args...)
    +//     })})
    +type LoggerFunc func(...interface{})
    +
    +// Log calls the wrapped function with the arguments provided
    +func (f LoggerFunc) Log(args ...interface{}) {
    +	f(args...)
    +}
    +
    +// NewDefaultLogger returns a Logger which will write log messages to stdout, and
    +// use same formatting runes as the stdlib log.Logger
    +func NewDefaultLogger() Logger {
    +	return &defaultLogger{
    +		logger: log.New(os.Stdout, "", log.LstdFlags),
    +	}
    +}
    +
    +// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
    +type defaultLogger struct {
    +	logger *log.Logger
    +}
    +
    +// Log logs the parameters to the stdlib logger. See log.Println.
    +func (l defaultLogger) Log(args ...interface{}) {
    +	l.logger.Println(args...)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
    new file mode 100644
    index 0000000..5279c19
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
    @@ -0,0 +1,187 @@
    +package request
    +
    +import (
    +	"fmt"
    +	"strings"
    +)
    +
    +// A Handlers provides a collection of request handlers for various
    +// stages of handling requests.
    +type Handlers struct {
    +	Validate         HandlerList
    +	Build            HandlerList
    +	Sign             HandlerList
    +	Send             HandlerList
    +	ValidateResponse HandlerList
    +	Unmarshal        HandlerList
    +	UnmarshalMeta    HandlerList
    +	UnmarshalError   HandlerList
    +	Retry            HandlerList
    +	AfterRetry       HandlerList
    +}
    +
    +// Copy returns of this handler's lists.
    +func (h *Handlers) Copy() Handlers {
    +	return Handlers{
    +		Validate:         h.Validate.copy(),
    +		Build:            h.Build.copy(),
    +		Sign:             h.Sign.copy(),
    +		Send:             h.Send.copy(),
    +		ValidateResponse: h.ValidateResponse.copy(),
    +		Unmarshal:        h.Unmarshal.copy(),
    +		UnmarshalError:   h.UnmarshalError.copy(),
    +		UnmarshalMeta:    h.UnmarshalMeta.copy(),
    +		Retry:            h.Retry.copy(),
    +		AfterRetry:       h.AfterRetry.copy(),
    +	}
    +}
    +
    +// Clear removes callback functions for all handlers
    +func (h *Handlers) Clear() {
    +	h.Validate.Clear()
    +	h.Build.Clear()
    +	h.Send.Clear()
    +	h.Sign.Clear()
    +	h.Unmarshal.Clear()
    +	h.UnmarshalMeta.Clear()
    +	h.UnmarshalError.Clear()
    +	h.ValidateResponse.Clear()
    +	h.Retry.Clear()
    +	h.AfterRetry.Clear()
    +}
    +
    +// A HandlerListRunItem represents an entry in the HandlerList which
    +// is being run.
    +type HandlerListRunItem struct {
    +	Index   int
    +	Handler NamedHandler
    +	Request *Request
    +}
    +
    +// A HandlerList manages zero or more handlers in a list.
    +type HandlerList struct {
    +	list []NamedHandler
    +
    +	// Called after each request handler in the list is called. If set
    +	// and the func returns true the HandlerList will continue to iterate
    +	// over the request handlers. If false is returned the HandlerList
    +	// will stop iterating.
    +	//
    +	// Should be used if extra logic to be performed between each handler
    +	// in the list. This can be used to terminate a list's iteration
    +	// based on a condition such as error like, HandlerListStopOnError.
    +	// Or for logging like HandlerListLogItem.
    +	AfterEachFn func(item HandlerListRunItem) bool
    +}
    +
    +// A NamedHandler is a struct that contains a name and function callback.
    +type NamedHandler struct {
    +	Name string
    +	Fn   func(*Request)
    +}
    +
    +// copy creates a copy of the handler list.
    +func (l *HandlerList) copy() HandlerList {
    +	n := HandlerList{
    +		AfterEachFn: l.AfterEachFn,
    +	}
    +	n.list = append([]NamedHandler{}, l.list...)
    +	return n
    +}
    +
    +// Clear clears the handler list.
    +func (l *HandlerList) Clear() {
    +	l.list = []NamedHandler{}
    +}
    +
    +// Len returns the number of handlers in the list.
    +func (l *HandlerList) Len() int {
    +	return len(l.list)
    +}
    +
    +// PushBack pushes handler f to the back of the handler list.
    +func (l *HandlerList) PushBack(f func(*Request)) {
    +	l.list = append(l.list, NamedHandler{"__anonymous", f})
    +}
    +
    +// PushFront pushes handler f to the front of the handler list.
    +func (l *HandlerList) PushFront(f func(*Request)) {
    +	l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
    +}
    +
    +// PushBackNamed pushes named handler f to the back of the handler list.
    +func (l *HandlerList) PushBackNamed(n NamedHandler) {
    +	l.list = append(l.list, n)
    +}
    +
    +// PushFrontNamed pushes named handler f to the front of the handler list.
    +func (l *HandlerList) PushFrontNamed(n NamedHandler) {
    +	l.list = append([]NamedHandler{n}, l.list...)
    +}
    +
    +// Remove removes a NamedHandler n
    +func (l *HandlerList) Remove(n NamedHandler) {
    +	newlist := []NamedHandler{}
    +	for _, m := range l.list {
    +		if m.Name != n.Name {
    +			newlist = append(newlist, m)
    +		}
    +	}
    +	l.list = newlist
    +}
    +
    +// Run executes all handlers in the list with a given request object.
    +func (l *HandlerList) Run(r *Request) {
    +	for i, h := range l.list {
    +		h.Fn(r)
    +		item := HandlerListRunItem{
    +			Index: i, Handler: h, Request: r,
    +		}
    +		if l.AfterEachFn != nil && !l.AfterEachFn(item) {
    +			return
    +		}
    +	}
    +}
    +
    +// HandlerListLogItem logs the request handler and the state of the
    +// request's Error value. Always returns true to continue iterating
    +// request handlers in a HandlerList.
    +func HandlerListLogItem(item HandlerListRunItem) bool {
    +	if item.Request.Config.Logger == nil {
    +		return true
    +	}
    +	item.Request.Config.Logger.Log("DEBUG: RequestHandler",
    +		item.Index, item.Handler.Name, item.Request.Error)
    +
    +	return true
    +}
    +
    +// HandlerListStopOnError returns false to stop the HandlerList iterating
    +// over request handlers if Request.Error is not nil. True otherwise
    +// to continue iterating.
    +func HandlerListStopOnError(item HandlerListRunItem) bool {
    +	return item.Request.Error == nil
    +}
    +
    +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
    +// header. If the extra parameters are provided they will be added as metadata to the
    +// name/version pair resulting in the following format.
    +// "name/version (extra0; extra1; ...)"
    +// The user agent part will be concatenated with this current request's user agent string.
    +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
    +	ua := fmt.Sprintf("%s/%s", name, version)
    +	if len(extra) > 0 {
    +		ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
    +	}
    +	return func(r *Request) {
    +		AddToUserAgent(r, ua)
    +	}
    +}
    +
    +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
    +// The input string will be concatenated with the current request's user agent string.
    +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
    +	return func(r *Request) {
    +		AddToUserAgent(r, s)
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
    new file mode 100644
    index 0000000..79f7960
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
    @@ -0,0 +1,24 @@
    +package request
    +
    +import (
    +	"io"
    +	"net/http"
    +	"net/url"
    +)
    +
    +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
    +	req := new(http.Request)
    +	*req = *r
    +	req.URL = &url.URL{}
    +	*req.URL = *r.URL
    +	req.Body = body
    +
    +	req.Header = http.Header{}
    +	for k, v := range r.Header {
    +		for _, vv := range v {
    +			req.Header.Add(k, vv)
    +		}
    +	}
    +
    +	return req
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
    new file mode 100644
    index 0000000..02f07f4
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
    @@ -0,0 +1,58 @@
    +package request
    +
    +import (
    +	"io"
    +	"sync"
    +)
    +
    +// offsetReader is a thread-safe io.ReadCloser to prevent racing
    +// with retrying requests
    +type offsetReader struct {
    +	buf    io.ReadSeeker
    +	lock   sync.Mutex
    +	closed bool
    +}
    +
    +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
    +	reader := &offsetReader{}
    +	buf.Seek(offset, 0)
    +
    +	reader.buf = buf
    +	return reader
    +}
    +
    +// Close will close the instance of the offset reader's access to
    +// the underlying io.ReadSeeker.
    +func (o *offsetReader) Close() error {
    +	o.lock.Lock()
    +	defer o.lock.Unlock()
    +	o.closed = true
    +	return nil
    +}
    +
    +// Read is a thread-safe read of the underlying io.ReadSeeker
    +func (o *offsetReader) Read(p []byte) (int, error) {
    +	o.lock.Lock()
    +	defer o.lock.Unlock()
    +
    +	if o.closed {
    +		return 0, io.EOF
    +	}
    +
    +	return o.buf.Read(p)
    +}
    +
    +// Seek is a thread-safe seeking operation.
    +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
    +	o.lock.Lock()
    +	defer o.lock.Unlock()
    +
    +	return o.buf.Seek(offset, whence)
    +}
    +
    +// CloseAndCopy will return a new offsetReader with a copy of the old buffer
    +// and close the old buffer.
    +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
    +	o.Close()
    +	return newOffsetReader(o.buf, offset)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
    new file mode 100644
    index 0000000..8ef9715
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
    @@ -0,0 +1,344 @@
    +package request
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"net/http"
    +	"net/url"
    +	"reflect"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/client/metadata"
    +)
    +
    +// A Request is the service request to be made.
    +type Request struct {
    +	Config     aws.Config
    +	ClientInfo metadata.ClientInfo
    +	Handlers   Handlers
    +
    +	Retryer
    +	Time             time.Time
    +	ExpireTime       time.Duration
    +	Operation        *Operation
    +	HTTPRequest      *http.Request
    +	HTTPResponse     *http.Response
    +	Body             io.ReadSeeker
    +	BodyStart        int64 // offset from beginning of Body that the request body starts
    +	Params           interface{}
    +	Error            error
    +	Data             interface{}
    +	RequestID        string
    +	RetryCount       int
    +	Retryable        *bool
    +	RetryDelay       time.Duration
    +	NotHoist         bool
    +	SignedHeaderVals http.Header
    +	LastSignedAt     time.Time
    +
    +	built bool
    +
    +	// Need to persist an intermideant body betweend the input Body and HTTP
    +	// request body because the HTTP Client's transport can maintain a reference
    +	// to the HTTP request's body after the client has returned. This value is
    +	// safe to use concurrently and rewraps the input Body for each HTTP request.
    +	safeBody *offsetReader
    +}
    +
    +// An Operation is the service API operation to be made.
    +type Operation struct {
    +	Name       string
    +	HTTPMethod string
    +	HTTPPath   string
    +	*Paginator
    +}
    +
    +// Paginator keeps track of pagination configuration for an API operation.
    +type Paginator struct {
    +	InputTokens     []string
    +	OutputTokens    []string
    +	LimitToken      string
    +	TruncationToken string
    +}
    +
    +// New returns a new Request pointer for the service API
    +// operation and parameters.
    +//
    +// Params is any value of input parameters to be the request payload.
    +// Data is pointer value to an object which the request's response
    +// payload will be deserialized to.
    +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
    +	retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
    +
    +	method := operation.HTTPMethod
    +	if method == "" {
    +		method = "POST"
    +	}
    +
    +	httpReq, _ := http.NewRequest(method, "", nil)
    +
    +	var err error
    +	httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
    +	if err != nil {
    +		httpReq.URL = &url.URL{}
    +		err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
    +	}
    +
    +	r := &Request{
    +		Config:     cfg,
    +		ClientInfo: clientInfo,
    +		Handlers:   handlers.Copy(),
    +
    +		Retryer:     retryer,
    +		Time:        time.Now(),
    +		ExpireTime:  0,
    +		Operation:   operation,
    +		HTTPRequest: httpReq,
    +		Body:        nil,
    +		Params:      params,
    +		Error:       err,
    +		Data:        data,
    +	}
    +	r.SetBufferBody([]byte{})
    +
    +	return r
    +}
    +
    +// WillRetry returns if the request's can be retried.
    +func (r *Request) WillRetry() bool {
    +	return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
    +}
    +
    +// ParamsFilled returns if the request's parameters have been populated
    +// and the parameters are valid. False is returned if no parameters are
    +// provided or invalid.
    +func (r *Request) ParamsFilled() bool {
    +	return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
    +}
    +
    +// DataFilled returns true if the request's data for response deserialization
    +// target has been set and is a valid. False is returned if data is not
    +// set, or is invalid.
    +func (r *Request) DataFilled() bool {
    +	return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
    +}
    +
    +// SetBufferBody will set the request's body bytes that will be sent to
    +// the service API.
    +func (r *Request) SetBufferBody(buf []byte) {
    +	r.SetReaderBody(bytes.NewReader(buf))
    +}
    +
    +// SetStringBody sets the body of the request to be backed by a string.
    +func (r *Request) SetStringBody(s string) {
    +	r.SetReaderBody(strings.NewReader(s))
    +}
    +
    +// SetReaderBody will set the request's body reader.
    +func (r *Request) SetReaderBody(reader io.ReadSeeker) {
    +	r.Body = reader
    +	r.ResetBody()
    +}
    +
    +// Presign returns the request's signed URL. Error will be returned
    +// if the signing fails.
    +func (r *Request) Presign(expireTime time.Duration) (string, error) {
    +	r.ExpireTime = expireTime
    +	r.NotHoist = false
    +	r.Sign()
    +	if r.Error != nil {
    +		return "", r.Error
    +	}
    +	return r.HTTPRequest.URL.String(), nil
    +}
    +
    +// PresignRequest behaves just like presign, but hoists all headers and signs them.
    +// Also returns the signed hash back to the user
    +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
    +	r.ExpireTime = expireTime
    +	r.NotHoist = true
    +	r.Sign()
    +	if r.Error != nil {
    +		return "", nil, r.Error
    +	}
    +	return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
    +}
    +
    +func debugLogReqError(r *Request, stage string, retrying bool, err error) {
    +	if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
    +		return
    +	}
    +
    +	retryStr := "not retrying"
    +	if retrying {
    +		retryStr = "will retry"
    +	}
    +
    +	r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
    +		stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
    +}
    +
    +// Build will build the request's object so it can be signed and sent
    +// to the service. Build will also validate all the request's parameters.
    +// Anny additional build Handlers set on this request will be run
    +// in the order they were set.
    +//
    +// The request will only be built once. Multiple calls to build will have
    +// no effect.
    +//
    +// If any Validate or Build errors occur the build will stop and the error
    +// which occurred will be returned.
    +func (r *Request) Build() error {
    +	if !r.built {
    +		r.Handlers.Validate.Run(r)
    +		if r.Error != nil {
    +			debugLogReqError(r, "Validate Request", false, r.Error)
    +			return r.Error
    +		}
    +		r.Handlers.Build.Run(r)
    +		if r.Error != nil {
    +			debugLogReqError(r, "Build Request", false, r.Error)
    +			return r.Error
    +		}
    +		r.built = true
    +	}
    +
    +	return r.Error
    +}
    +
    +// Sign will sign the request returning error if errors are encountered.
    +//
    +// Send will build the request prior to signing. All Sign Handlers will
    +// be executed in the order they were set.
    +func (r *Request) Sign() error {
    +	r.Build()
    +	if r.Error != nil {
    +		debugLogReqError(r, "Build Request", false, r.Error)
    +		return r.Error
    +	}
    +
    +	r.Handlers.Sign.Run(r)
    +	return r.Error
    +}
    +
    +// ResetBody rewinds the request body backto its starting position, and
    +// set's the HTTP Request body reference. When the body is read prior
    +// to being sent in the HTTP request it will need to be rewound.
    +func (r *Request) ResetBody() {
    +	if r.safeBody != nil {
    +		r.safeBody.Close()
    +	}
    +
    +	r.safeBody = newOffsetReader(r.Body, r.BodyStart)
    +	r.HTTPRequest.Body = r.safeBody
    +}
    +
    +// GetBody will return an io.ReadSeeker of the Request's underlying
    +// input body with a concurrency safe wrapper.
    +func (r *Request) GetBody() io.ReadSeeker {
    +	return r.safeBody
    +}
    +
    +// Send will send the request returning error if errors are encountered.
    +//
    +// Send will sign the request prior to sending. All Send Handlers will
    +// be executed in the order they were set.
    +//
    +// Canceling a request is non-deterministic. If a request has been canceled,
    +// then the transport will choose, randomly, one of the state channels during
    +// reads or getting the connection.
    +//
    +// readLoop() and getConn(req *Request, cm connectMethod)
    +// https://github.com/golang/go/blob/master/src/net/http/transport.go
    +//
    +// Send will not close the request.Request's body.
    +func (r *Request) Send() error {
    +	for {
    +		if aws.BoolValue(r.Retryable) {
    +			if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
    +				r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
    +					r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
    +			}
    +
    +			// The previous http.Request will have a reference to the r.Body
    +			// and the HTTP Client's Transport may still be reading from
    +			// the request's body even though the Client's Do returned.
    +			r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
    +			r.ResetBody()
    +
    +			// Closing response body to ensure that no response body is leaked
    +			// between retry attempts.
    +			if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
    +				r.HTTPResponse.Body.Close()
    +			}
    +		}
    +
    +		r.Sign()
    +		if r.Error != nil {
    +			return r.Error
    +		}
    +
    +		r.Retryable = nil
    +
    +		r.Handlers.Send.Run(r)
    +		if r.Error != nil {
    +			if strings.Contains(r.Error.Error(), "net/http: request canceled") {
    +				return r.Error
    +			}
    +
    +			err := r.Error
    +			r.Handlers.Retry.Run(r)
    +			r.Handlers.AfterRetry.Run(r)
    +			if r.Error != nil {
    +				debugLogReqError(r, "Send Request", false, r.Error)
    +				return r.Error
    +			}
    +			debugLogReqError(r, "Send Request", true, err)
    +			continue
    +		}
    +		r.Handlers.UnmarshalMeta.Run(r)
    +		r.Handlers.ValidateResponse.Run(r)
    +		if r.Error != nil {
    +			err := r.Error
    +			r.Handlers.UnmarshalError.Run(r)
    +			r.Handlers.Retry.Run(r)
    +			r.Handlers.AfterRetry.Run(r)
    +			if r.Error != nil {
    +				debugLogReqError(r, "Validate Response", false, r.Error)
    +				return r.Error
    +			}
    +			debugLogReqError(r, "Validate Response", true, err)
    +			continue
    +		}
    +
    +		r.Handlers.Unmarshal.Run(r)
    +		if r.Error != nil {
    +			err := r.Error
    +			r.Handlers.Retry.Run(r)
    +			r.Handlers.AfterRetry.Run(r)
    +			if r.Error != nil {
    +				debugLogReqError(r, "Unmarshal Response", false, r.Error)
    +				return r.Error
    +			}
    +			debugLogReqError(r, "Unmarshal Response", true, err)
    +			continue
    +		}
    +
    +		break
    +	}
    +
    +	return nil
    +}
    +
    +// AddToUserAgent adds the string to the end of the request's current user agent.
    +func AddToUserAgent(r *Request, s string) {
    +	curUA := r.HTTPRequest.Header.Get("User-Agent")
    +	if len(curUA) > 0 {
    +		s = curUA + " " + s
    +	}
    +	r.HTTPRequest.Header.Set("User-Agent", s)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
    new file mode 100644
    index 0000000..2939ec4
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
    @@ -0,0 +1,104 @@
    +package request
    +
    +import (
    +	"reflect"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awsutil"
    +)
    +
    +//type Paginater interface {
    +//	HasNextPage() bool
    +//	NextPage() *Request
    +//	EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
    +//}
    +
    +// HasNextPage returns true if this request has more pages of data available.
    +func (r *Request) HasNextPage() bool {
    +	return len(r.nextPageTokens()) > 0
    +}
    +
    +// nextPageTokens returns the tokens to use when asking for the next page of
    +// data.
    +func (r *Request) nextPageTokens() []interface{} {
    +	if r.Operation.Paginator == nil {
    +		return nil
    +	}
    +
    +	if r.Operation.TruncationToken != "" {
    +		tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
    +		if len(tr) == 0 {
    +			return nil
    +		}
    +
    +		switch v := tr[0].(type) {
    +		case *bool:
    +			if !aws.BoolValue(v) {
    +				return nil
    +			}
    +		case bool:
    +			if v == false {
    +				return nil
    +			}
    +		}
    +	}
    +
    +	tokens := []interface{}{}
    +	tokenAdded := false
    +	for _, outToken := range r.Operation.OutputTokens {
    +		v, _ := awsutil.ValuesAtPath(r.Data, outToken)
    +		if len(v) > 0 {
    +			tokens = append(tokens, v[0])
    +			tokenAdded = true
    +		} else {
    +			tokens = append(tokens, nil)
    +		}
    +	}
    +	if !tokenAdded {
    +		return nil
    +	}
    +
    +	return tokens
    +}
    +
    +// NextPage returns a new Request that can be executed to return the next
    +// page of result data. Call .Send() on this request to execute it.
    +func (r *Request) NextPage() *Request {
    +	tokens := r.nextPageTokens()
    +	if len(tokens) == 0 {
    +		return nil
    +	}
    +
    +	data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
    +	nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
    +	for i, intok := range nr.Operation.InputTokens {
    +		awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
    +	}
    +	return nr
    +}
    +
    +// EachPage iterates over each page of a paginated request object. The fn
    +// parameter should be a function with the following sample signature:
    +//
    +//   func(page *T, lastPage bool) bool {
    +//       return true // return false to stop iterating
    +//   }
    +//
    +// Where "T" is the structure type matching the output structure of the given
    +// operation. For example, a request object generated by
    +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
    +// as the structure "T". The lastPage value represents whether the page is
    +// the last page of data or not. The return value of this function should
    +// return true to keep iterating or false to stop.
    +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
    +	for page := r; page != nil; page = page.NextPage() {
    +		if err := page.Send(); err != nil {
    +			return err
    +		}
    +		if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
    +			return page.Error
    +		}
    +	}
    +
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
    new file mode 100644
    index 0000000..8cc8b01
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
    @@ -0,0 +1,101 @@
    +package request
    +
    +import (
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +)
    +
    +// Retryer is an interface to control retry logic for a given service.
    +// The default implementation used by most services is the service.DefaultRetryer
    +// structure, which contains basic retry logic using exponential backoff.
    +type Retryer interface {
    +	RetryRules(*Request) time.Duration
    +	ShouldRetry(*Request) bool
    +	MaxRetries() int
    +}
    +
    +// WithRetryer sets a config Retryer value to the given Config returning it
    +// for chaining.
    +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
    +	cfg.Retryer = retryer
    +	return cfg
    +}
    +
    +// retryableCodes is a collection of service response codes which are retry-able
    +// without any further action.
    +var retryableCodes = map[string]struct{}{
    +	"RequestError":   {},
    +	"RequestTimeout": {},
    +}
    +
    +var throttleCodes = map[string]struct{}{
    +	"ProvisionedThroughputExceededException": {},
    +	"Throttling":                             {},
    +	"ThrottlingException":                    {},
    +	"RequestLimitExceeded":                   {},
    +	"RequestThrottled":                       {},
    +	"LimitExceededException":                 {}, // Deleting 10+ DynamoDb tables at once
    +	"TooManyRequestsException":               {}, // Lambda functions
    +}
    +
    +// credsExpiredCodes is a collection of error codes which signify the credentials
    +// need to be refreshed. Expired tokens require refreshing of credentials, and
    +// resigning before the request can be retried.
    +var credsExpiredCodes = map[string]struct{}{
    +	"ExpiredToken":          {},
    +	"ExpiredTokenException": {},
    +	"RequestExpired":        {}, // EC2 Only
    +}
    +
    +func isCodeThrottle(code string) bool {
    +	_, ok := throttleCodes[code]
    +	return ok
    +}
    +
    +func isCodeRetryable(code string) bool {
    +	if _, ok := retryableCodes[code]; ok {
    +		return true
    +	}
    +
    +	return isCodeExpiredCreds(code)
    +}
    +
    +func isCodeExpiredCreds(code string) bool {
    +	_, ok := credsExpiredCodes[code]
    +	return ok
    +}
    +
    +// IsErrorRetryable returns whether the error is retryable, based on its Code.
    +// Returns false if the request has no Error set.
    +func (r *Request) IsErrorRetryable() bool {
    +	if r.Error != nil {
    +		if err, ok := r.Error.(awserr.Error); ok {
    +			return isCodeRetryable(err.Code())
    +		}
    +	}
    +	return false
    +}
    +
    +// IsErrorThrottle returns whether the error is to be throttled based on its code.
    +// Returns false if the request has no Error set
    +func (r *Request) IsErrorThrottle() bool {
    +	if r.Error != nil {
    +		if err, ok := r.Error.(awserr.Error); ok {
    +			return isCodeThrottle(err.Code())
    +		}
    +	}
    +	return false
    +}
    +
    +// IsErrorExpired returns whether the error code is a credential expiry error.
    +// Returns false if the request has no Error set.
    +func (r *Request) IsErrorExpired() bool {
    +	if r.Error != nil {
    +		if err, ok := r.Error.(awserr.Error); ok {
    +			return isCodeExpiredCreds(err.Code())
    +		}
    +	}
    +	return false
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
    new file mode 100644
    index 0000000..2520286
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
    @@ -0,0 +1,234 @@
    +package request
    +
    +import (
    +	"bytes"
    +	"fmt"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +)
    +
    +const (
    +	// InvalidParameterErrCode is the error code for invalid parameters errors
    +	InvalidParameterErrCode = "InvalidParameter"
    +	// ParamRequiredErrCode is the error code for required parameter errors
    +	ParamRequiredErrCode = "ParamRequiredError"
    +	// ParamMinValueErrCode is the error code for fields with too low of a
    +	// number value.
    +	ParamMinValueErrCode = "ParamMinValueError"
    +	// ParamMinLenErrCode is the error code for fields without enough elements.
    +	ParamMinLenErrCode = "ParamMinLenError"
    +)
    +
    +// Validator provides a way for types to perform validation logic on their
    +// input values that external code can use to determine if a type's values
    +// are valid.
    +type Validator interface {
    +	Validate() error
    +}
    +
    +// An ErrInvalidParams provides wrapping of invalid parameter errors found when
    +// validating API operation input parameters.
    +type ErrInvalidParams struct {
    +	// Context is the base context of the invalid parameter group.
    +	Context string
    +	errs    []ErrInvalidParam
    +}
    +
    +// Add adds a new invalid parameter error to the collection of invalid
    +// parameters. The context of the invalid parameter will be updated to reflect
    +// this collection.
    +func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
    +	err.SetContext(e.Context)
    +	e.errs = append(e.errs, err)
    +}
    +
    +// AddNested adds the invalid parameter errors from another ErrInvalidParams
    +// value into this collection. The nested errors will have their nested context
    +// updated and base context to reflect the merging.
    +//
    +// Use for nested validations errors.
    +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
    +	for _, err := range nested.errs {
    +		err.SetContext(e.Context)
    +		err.AddNestedContext(nestedCtx)
    +		e.errs = append(e.errs, err)
    +	}
    +}
    +
    +// Len returns the number of invalid parameter errors
    +func (e ErrInvalidParams) Len() int {
    +	return len(e.errs)
    +}
    +
    +// Code returns the code of the error
    +func (e ErrInvalidParams) Code() string {
    +	return InvalidParameterErrCode
    +}
    +
    +// Message returns the message of the error
    +func (e ErrInvalidParams) Message() string {
    +	return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
    +}
    +
    +// Error returns the string formatted form of the invalid parameters.
    +func (e ErrInvalidParams) Error() string {
    +	w := &bytes.Buffer{}
    +	fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
    +
    +	for _, err := range e.errs {
    +		fmt.Fprintf(w, "- %s\n", err.Message())
    +	}
    +
    +	return w.String()
    +}
    +
    +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
    +func (e ErrInvalidParams) OrigErr() error {
    +	return awserr.NewBatchError(
    +		InvalidParameterErrCode, e.Message(), e.OrigErrs())
    +}
    +
    +// OrigErrs returns a slice of the invalid parameters
    +func (e ErrInvalidParams) OrigErrs() []error {
    +	errs := make([]error, len(e.errs))
    +	for i := 0; i < len(errs); i++ {
    +		errs[i] = e.errs[i]
    +	}
    +
    +	return errs
    +}
    +
    +// An ErrInvalidParam represents an invalid parameter error type.
    +type ErrInvalidParam interface {
    +	awserr.Error
    +
    +	// Field name the error occurred on.
    +	Field() string
    +
    +	// SetContext updates the context of the error.
    +	SetContext(string)
    +
    +	// AddNestedContext updates the error's context to include a nested level.
    +	AddNestedContext(string)
    +}
    +
    +type errInvalidParam struct {
    +	context       string
    +	nestedContext string
    +	field         string
    +	code          string
    +	msg           string
    +}
    +
    +// Code returns the error code for the type of invalid parameter.
    +func (e *errInvalidParam) Code() string {
    +	return e.code
    +}
    +
    +// Message returns the reason the parameter was invalid, and its context.
    +func (e *errInvalidParam) Message() string {
    +	return fmt.Sprintf("%s, %s.", e.msg, e.Field())
    +}
    +
    +// Error returns the string version of the invalid parameter error.
    +func (e *errInvalidParam) Error() string {
    +	return fmt.Sprintf("%s: %s", e.code, e.Message())
    +}
    +
    +// OrigErr returns nil, Implemented for awserr.Error interface.
    +func (e *errInvalidParam) OrigErr() error {
    +	return nil
    +}
    +
    +// Field Returns the field and context the error occurred.
    +func (e *errInvalidParam) Field() string {
    +	field := e.context
    +	if len(field) > 0 {
    +		field += "."
    +	}
    +	if len(e.nestedContext) > 0 {
    +		field += fmt.Sprintf("%s.", e.nestedContext)
    +	}
    +	field += e.field
    +
    +	return field
    +}
    +
    +// SetContext updates the base context of the error.
    +func (e *errInvalidParam) SetContext(ctx string) {
    +	e.context = ctx
    +}
    +
    +// AddNestedContext prepends a context to the field's path.
    +func (e *errInvalidParam) AddNestedContext(ctx string) {
    +	if len(e.nestedContext) == 0 {
    +		e.nestedContext = ctx
    +	} else {
    +		e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
    +	}
    +
    +}
    +
    +// An ErrParamRequired represents an required parameter error.
    +type ErrParamRequired struct {
    +	errInvalidParam
    +}
    +
    +// NewErrParamRequired creates a new required parameter error.
    +func NewErrParamRequired(field string) *ErrParamRequired {
    +	return &ErrParamRequired{
    +		errInvalidParam{
    +			code:  ParamRequiredErrCode,
    +			field: field,
    +			msg:   fmt.Sprintf("missing required field"),
    +		},
    +	}
    +}
    +
    +// An ErrParamMinValue represents a minimum value parameter error.
    +type ErrParamMinValue struct {
    +	errInvalidParam
    +	min float64
    +}
    +
    +// NewErrParamMinValue creates a new minimum value parameter error.
    +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
    +	return &ErrParamMinValue{
    +		errInvalidParam: errInvalidParam{
    +			code:  ParamMinValueErrCode,
    +			field: field,
    +			msg:   fmt.Sprintf("minimum field value of %v", min),
    +		},
    +		min: min,
    +	}
    +}
    +
    +// MinValue returns the field's require minimum value.
    +//
    +// float64 is returned for both int and float min values.
    +func (e *ErrParamMinValue) MinValue() float64 {
    +	return e.min
    +}
    +
    +// An ErrParamMinLen represents a minimum length parameter error.
    +type ErrParamMinLen struct {
    +	errInvalidParam
    +	min int
    +}
    +
    +// NewErrParamMinLen creates a new minimum length parameter error.
    +func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
    +	return &ErrParamMinLen{
    +		errInvalidParam: errInvalidParam{
    +			code:  ParamMinValueErrCode,
    +			field: field,
    +			msg:   fmt.Sprintf("minimum field size of %v", min),
    +		},
    +		min: min,
    +	}
    +}
    +
    +// MinLen returns the field's required minimum length.
    +func (e *ErrParamMinLen) MinLen() int {
    +	return e.min
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
    new file mode 100644
    index 0000000..d3dc840
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
    @@ -0,0 +1,223 @@
    +/*
    +Package session provides configuration for the SDK's service clients.
    +
    +Sessions can be shared across all service clients that share the same base
    +configuration.  The Session is built from the SDK's default configuration and
    +request handlers.
    +
    +Sessions should be cached when possible, because creating a new Session will
    +load all configuration values from the environment, and config files each time
    +the Session is created. Sharing the Session value across all of your service
    +clients will ensure the configuration is loaded the fewest number of times possible.
    +
    +Concurrency
    +
    +Sessions are safe to use concurrently as long as the Session is not being
    +modified. The SDK will not modify the Session once the Session has been created.
    +Creating service clients concurrently from a shared Session is safe.
    +
    +Sessions from Shared Config
    +
    +Sessions can be created using the method above that will only load the
    +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
    +Alternatively you can explicitly create a Session with shared config enabled.
    +To do this you can use NewSessionWithOptions to configure how the Session will
    +be created. Using the NewSessionWithOptions with SharedConfigState set to
    +SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG
    +environment variable was set.
    +
    +Creating Sessions
    +
    +When creating Sessions optional aws.Config values can be passed in that will
    +override the default, or loaded config values the Session is being created
    +with. This allows you to provide additional, or case based, configuration
    +as needed.
    +
    +By default NewSession will only load credentials from the shared credentials
    +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
    +set to a truthy value the Session will be created from the configuration
    +values from the shared config (~/.aws/config) and shared credentials
    +(~/.aws/credentials) files. See the section Sessions from Shared Config for
    +more information.
    +
    +Create a Session with the default config and request handlers. With credentials
    +region, and profile loaded from the environment and shared config automatically.
    +Requires the AWS_PROFILE to be set, or "default" is used.
    +
    +	// Create Session
    +	sess, err := session.NewSession()
    +
    +	// Create a Session with a custom region
    +	sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")})
    +
    +	// Create a S3 client instance from a session
    +	sess, err := session.NewSession()
    +	if err != nil {
    +		// Handle Session creation error
    +	}
    +	svc := s3.New(sess)
    +
    +Create Session With Option Overrides
    +
    +In addition to NewSession, Sessions can be created using NewSessionWithOptions.
    +This func allows you to control and override how the Session will be created
    +through code instead of being driven by environment variables only.
    +
    +Use NewSessionWithOptions when you want to provide the config profile, or
    +override the shared config state (AWS_SDK_LOAD_CONFIG).
    +
    +	// Equivalent to session.NewSession()
    +	sess, err := session.NewSessionWithOptions(session.Options{})
    +
    +	// Specify profile to load for the session's config
    +	sess, err := session.NewSessionWithOptions(session.Options{
    +		 Profile: "profile_name",
    +	})
    +
    +	// Specify profile for config and region for requests
    +	sess, err := session.NewSessionWithOptions(session.Options{
    +		 Config: aws.Config{Region: aws.String("us-east-1")},
    +		 Profile: "profile_name",
    +	})
    +
    +	// Force enable Shared Config support
    +	sess, err := session.NewSessionWithOptions(session.Options{
    +		SharedConfigState: SharedConfigEnable,
    +	})
    +
    +Adding Handlers
    +
    +You can add handlers to a session for processing HTTP requests. All service
    +clients that use the session inherit the handlers. For example, the following
    +handler logs every request and its payload made by a service client:
    +
    +	// Create a session, and add additional handlers for all service
    +	// clients created with the Session to inherit. Adds logging handler.
    +	sess, err := session.NewSession()
    +	sess.Handlers.Send.PushFront(func(r *request.Request) {
    +		// Log every request made and its payload
    +		logger.Println("Request: %s/%s, Payload: %s",
    +			r.ClientInfo.ServiceName, r.Operation, r.Params)
    +	})
    +
    +Deprecated "New" function
    +
    +The New session function has been deprecated because it does not provide good
    +way to return errors that occur when loading the configuration files and values.
    +Because of this, NewSession was created so errors can be retrieved when
    +creating a session fails.
    +
    +Shared Config Fields
    +
    +By default the SDK will only load the shared credentials file's (~/.aws/credentials)
    +credentials values, and all other config is provided by the environment variables,
    +SDK defaults, and user provided aws.Config values.
    +
    +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
    +option is used to create the Session the full shared config values will be
    +loaded. This includes credentials, region, and support for assume role. In
    +addition the Session will load its configuration from both the shared config
    +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
    +files have the same format.
    +
    +If both config files are present the configuration from both files will be
    +read. The Session will be created from  configuration values from the shared
    +credentials file (~/.aws/credentials) over those in the shared credentials
    +file (~/.aws/config).
    +
    +Credentials are the values the SDK should use for authenticating requests with
    +AWS Services. They arfrom a configuration file will need to include both
    +aws_access_key_id and aws_secret_access_key must be provided together in the
    +same file to be considered valid. The values will be ignored if not a complete
    +group. aws_session_token is an optional field that can be provided if both of
    +the other two fields are also provided.
    +
    +	aws_access_key_id = AKID
    +	aws_secret_access_key = SECRET
    +	aws_session_token = TOKEN
    +
    +Assume Role values allow you to configure the SDK to assume an IAM role using
    +a set of credentials provided in a config file via the source_profile field.
    +Both "role_arn" and "source_profile" are required. The SDK does not support
    +assuming a role with MFA token Via the Session's constructor. You can use the
    +stscreds.AssumeRoleProvider credentials provider to specify custom
    +configuration and support for MFA.
    +
    +	role_arn = arn:aws:iam:::role/
    +	source_profile = profile_with_creds
    +	external_id = 1234
    +	mfa_serial = not supported!
    +	role_session_name = session_name
    +
    +Region is the region the SDK should use for looking up AWS service endpoints
    +and signing requests.
    +
    +	region = us-east-1
    +
    +Environment Variables
    +
    +When a Session is created several environment variables can be set to adjust
    +how the SDK functions, and what configuration data it loads when creating
    +Sessions. All environment values are optional, but some values like credentials
    +require multiple of the values to set or the partial values will be ignored.
    +All environment variable values are strings unless otherwise noted.
    +
    +Environment configuration values. If set both Access Key ID and Secret Access
    +Key must be provided. Session Token and optionally also be provided, but is
    +not required.
    +
    +	# Access Key ID
    +	AWS_ACCESS_KEY_ID=AKID
    +	AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
    +
    +	# Secret Access Key
    +	AWS_SECRET_ACCESS_KEY=SECRET
    +	AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
    +
    +	# Session Token
    +	AWS_SESSION_TOKEN=TOKEN
    +
    +Region value will instruct the SDK where to make service API requests to. If is
    +not provided in the environment the region must be provided before a service
    +client request is made.
    +
    +	AWS_REGION=us-east-1
    +
    +	# AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
    +	# and AWS_REGION is not also set.
    +	AWS_DEFAULT_REGION=us-east-1
    +
    +Profile name the SDK should load use when loading shared config from the
    +configuration files. If not provided "default" will be used as the profile name.
    +
    +	AWS_PROFILE=my_profile
    +
    +	# AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
    +	# and AWS_PROFILE is not also set.
    +	AWS_DEFAULT_PROFILE=my_profile
    +
    +SDK load config instructs the SDK to load the shared config in addition to
    +shared credentials. This also expands the configuration loaded so the shared
    +credentials will have parity with the shared config file. This also enables
    +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
    +env values as well.
    +
    +	AWS_SDK_LOAD_CONFIG=1
    +
    +Shared credentials file path can be set to instruct the SDK to use an alternative
    +file for the shared credentials. If not set the file will be loaded from
    +$HOME/.aws/credentials on Linux/Unix based systems, and
    +%USERPROFILE%\.aws\credentials on Windows.
    +
    +	AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
    +
    +Shared config file path can be set to instruct the SDK to use an alternative
    +file for the shared config. If not set the file will be loaded from
    +$HOME/.aws/config on Linux/Unix based systems, and
    +%USERPROFILE%\.aws\config on Windows.
    +
    +	AWS_CONFIG_FILE=$HOME/my_shared_config
    +
    +
    +*/
    +package session
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
    new file mode 100644
    index 0000000..d2f0c84
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
    @@ -0,0 +1,188 @@
    +package session
    +
    +import (
    +	"os"
    +	"path/filepath"
    +	"strconv"
    +
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +)
    +
    +// envConfig is a collection of environment values the SDK will read
    +// setup config from. All environment values are optional. But some values
    +// such as credentials require multiple values to be complete or the values
    +// will be ignored.
    +type envConfig struct {
    +	// Environment configuration values. If set both Access Key ID and Secret Access
    +	// Key must be provided. Session Token and optionally also be provided, but is
    +	// not required.
    +	//
    +	//	# Access Key ID
    +	//	AWS_ACCESS_KEY_ID=AKID
    +	//	AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
    +	//
    +	//	# Secret Access Key
    +	//	AWS_SECRET_ACCESS_KEY=SECRET
    +	//	AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
    +	//
    +	//	# Session Token
    +	//	AWS_SESSION_TOKEN=TOKEN
    +	Creds credentials.Value
    +
    +	// Region value will instruct the SDK where to make service API requests to. If is
    +	// not provided in the environment the region must be provided before a service
    +	// client request is made.
    +	//
    +	//	AWS_REGION=us-east-1
    +	//
    +	//	# AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
    +	//	# and AWS_REGION is not also set.
    +	//	AWS_DEFAULT_REGION=us-east-1
    +	Region string
    +
    +	// Profile name the SDK should load use when loading shared configuration from the
    +	// shared configuration files. If not provided "default" will be used as the
    +	// profile name.
    +	//
    +	//	AWS_PROFILE=my_profile
    +	//
    +	//	# AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
    +	//	# and AWS_PROFILE is not also set.
    +	//	AWS_DEFAULT_PROFILE=my_profile
    +	Profile string
    +
    +	// SDK load config instructs the SDK to load the shared config in addition to
    +	// shared credentials. This also expands the configuration loaded from the shared
    +	// credentials to have parity with the shared config file. This also enables
    +	// Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
    +	// env values as well.
    +	//
    +	//	AWS_SDK_LOAD_CONFIG=1
    +	EnableSharedConfig bool
    +
    +	// Shared credentials file path can be set to instruct the SDK to use an alternate
    +	// file for the shared credentials. If not set the file will be loaded from
    +	// $HOME/.aws/credentials on Linux/Unix based systems, and
    +	// %USERPROFILE%\.aws\credentials on Windows.
    +	//
    +	//	AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
    +	SharedCredentialsFile string
    +
    +	// Shared config file path can be set to instruct the SDK to use an alternate
    +	// file for the shared config. If not set the file will be loaded from
    +	// $HOME/.aws/config on Linux/Unix based systems, and
    +	// %USERPROFILE%\.aws\config on Windows.
    +	//
    +	//	AWS_CONFIG_FILE=$HOME/my_shared_config
    +	SharedConfigFile string
    +}
    +
    +var (
    +	credAccessEnvKey = []string{
    +		"AWS_ACCESS_KEY_ID",
    +		"AWS_ACCESS_KEY",
    +	}
    +	credSecretEnvKey = []string{
    +		"AWS_SECRET_ACCESS_KEY",
    +		"AWS_SECRET_KEY",
    +	}
    +	credSessionEnvKey = []string{
    +		"AWS_SESSION_TOKEN",
    +	}
    +
    +	regionEnvKeys = []string{
    +		"AWS_REGION",
    +		"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
    +	}
    +	profileEnvKeys = []string{
    +		"AWS_PROFILE",
    +		"AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
    +	}
    +)
    +
    +// loadEnvConfig retrieves the SDK's environment configuration.
    +// See `envConfig` for the values that will be retrieved.
    +//
    +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
    +// the shared SDK config will be loaded in addition to the SDK's specific
    +// configuration values.
    +func loadEnvConfig() envConfig {
    +	enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
    +	return envConfigLoad(enableSharedConfig)
    +}
    +
    +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
    +// SDK shared config. See `envConfig` for the values that will be retrieved.
    +//
    +// Loads the shared configuration in addition to the SDK's specific configuration.
    +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
    +// environment variable is set.
    +func loadSharedEnvConfig() envConfig {
    +	return envConfigLoad(true)
    +}
    +
    +func envConfigLoad(enableSharedConfig bool) envConfig {
    +	cfg := envConfig{}
    +
    +	cfg.EnableSharedConfig = enableSharedConfig
    +
    +	setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
    +	setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
    +	setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
    +
    +	// Require logical grouping of credentials
    +	if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
    +		cfg.Creds = credentials.Value{}
    +	} else {
    +		cfg.Creds.ProviderName = "EnvConfigCredentials"
    +	}
    +
    +	regionKeys := regionEnvKeys
    +	profileKeys := profileEnvKeys
    +	if !cfg.EnableSharedConfig {
    +		regionKeys = regionKeys[:1]
    +		profileKeys = profileKeys[:1]
    +	}
    +
    +	setFromEnvVal(&cfg.Region, regionKeys)
    +	setFromEnvVal(&cfg.Profile, profileKeys)
    +
    +	cfg.SharedCredentialsFile = sharedCredentialsFilename()
    +	cfg.SharedConfigFile = sharedConfigFilename()
    +
    +	return cfg
    +}
    +
    +func setFromEnvVal(dst *string, keys []string) {
    +	for _, k := range keys {
    +		if v := os.Getenv(k); len(v) > 0 {
    +			*dst = v
    +			break
    +		}
    +	}
    +}
    +
    +func sharedCredentialsFilename() string {
    +	if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
    +		return name
    +	}
    +
    +	return filepath.Join(userHomeDir(), ".aws", "credentials")
    +}
    +
    +func sharedConfigFilename() string {
    +	if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
    +		return name
    +	}
    +
    +	return filepath.Join(userHomeDir(), ".aws", "config")
    +}
    +
    +func userHomeDir() string {
    +	homeDir := os.Getenv("HOME") // *nix
    +	if len(homeDir) == 0 {       // windows
    +		homeDir = os.Getenv("USERPROFILE")
    +	}
    +
    +	return homeDir
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
    new file mode 100644
    index 0000000..602f4e1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
    @@ -0,0 +1,393 @@
    +package session
    +
    +import (
    +	"fmt"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/corehandlers"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
    +	"github.com/aws/aws-sdk-go/aws/defaults"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/endpoints"
    +)
    +
    +// A Session provides a central location to create service clients from and
    +// store configurations and request handlers for those services.
    +//
    +// Sessions are safe to create service clients concurrently, but it is not safe
    +// to mutate the Session concurrently.
    +//
    +// The Session satisfies the service client's client.ClientConfigProvider.
    +type Session struct {
    +	Config   *aws.Config
    +	Handlers request.Handlers
    +}
    +
    +// New creates a new instance of the handlers merging in the provided configs
    +// on top of the SDK's default configurations. Once the Session is created it
    +// can be mutated to modify the Config or Handlers. The Session is safe to be
    +// read concurrently, but it should not be written to concurrently.
    +//
    +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
    +// method could now encounter an error when loading the configuration. When
    +// The environment variable is set, and an error occurs, New will return a
    +// session that will fail all requests reporting the error that occured while
    +// loading the session. Use NewSession to get the error when creating the
    +// session.
    +//
    +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
    +// the shared config file (~/.aws/config) will also be loaded, in addition to
    +// the shared credentials file (~/.aws/config). Values set in both the
    +// shared config, and shared credentials will be taken from the shared
    +// credentials file.
    +//
    +// Deprecated: Use NewSession functiions to create sessions instead. NewSession
    +// has the same functionality as New except an error can be returned when the
    +// func is called instead of waiting to receive an error until a request is made.
    +func New(cfgs ...*aws.Config) *Session {
    +	// load initial config from environment
    +	envCfg := loadEnvConfig()
    +
    +	if envCfg.EnableSharedConfig {
    +		s, err := newSession(envCfg, cfgs...)
    +		if err != nil {
    +			// Old session.New expected all errors to be discovered when
    +			// a request is made, and would report the errors then. This
    +			// needs to be replicated if an error occurs while creating
    +			// the session.
    +			msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
    +				"Use session.NewSession to handle errors occuring during session creation."
    +
    +			// Session creation failed, need to report the error and prevent
    +			// any requests from succeeding.
    +			s = &Session{Config: defaults.Config()}
    +			s.Config.MergeIn(cfgs...)
    +			s.Config.Logger.Log("ERROR:", msg, "Error:", err)
    +			s.Handlers.Validate.PushBack(func(r *request.Request) {
    +				r.Error = err
    +			})
    +		}
    +		return s
    +	}
    +
    +	return oldNewSession(cfgs...)
    +}
    +
    +// NewSession returns a new Session created from SDK defaults, config files,
    +// environment, and user provided config files. Once the Session is created
    +// it can be mutated to modify the Config or Handlers. The Session is safe to
    +// be read concurrently, but it should not be written to concurrently.
    +//
    +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
    +// the shared config file (~/.aws/config) will also be loaded in addition to
    +// the shared credentials file (~/.aws/config). Values set in both the
    +// shared config, and shared credentials will be taken from the shared
    +// credentials file. Enabling the Shared Config will also allow the Session
    +// to be built with retrieving credentials with AssumeRole set in the config.
    +//
    +// See the NewSessionWithOptions func for information on how to override or
    +// control through code how the Session will be created. Such as specifing the
    +// config profile, and controlling if shared config is enabled or not.
    +func NewSession(cfgs ...*aws.Config) (*Session, error) {
    +	envCfg := loadEnvConfig()
    +
    +	return newSession(envCfg, cfgs...)
    +}
    +
    +// SharedConfigState provides the ability to optionally override the state
    +// of the session's creation based on the shared config being enabled or
    +// disabled.
    +type SharedConfigState int
    +
    +const (
    +	// SharedConfigStateFromEnv does not override any state of the
    +	// AWS_SDK_LOAD_CONFIG env var. It is the default value of the
    +	// SharedConfigState type.
    +	SharedConfigStateFromEnv SharedConfigState = iota
    +
    +	// SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
    +	// and disables the shared config functionality.
    +	SharedConfigDisable
    +
    +	// SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
    +	// and enables the shared config functionality.
    +	SharedConfigEnable
    +)
    +
    +// Options provides the means to control how a Session is created and what
    +// configuration values will be loaded.
    +//
    +type Options struct {
    +	// Provides config values for the SDK to use when creating service clients
    +	// and making API requests to services. Any value set in with this field
    +	// will override the associated value provided by the SDK defaults,
    +	// environment or config files where relevent.
    +	//
    +	// If not set, configuration values from from SDK defaults, environment,
    +	// config will be used.
    +	Config aws.Config
    +
    +	// Overrides the config profile the Session should be created from. If not
    +	// set the value of the environment variable will be loaded (AWS_PROFILE,
    +	// or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
    +	//
    +	// If not set and environment variables are not set the "default"
    +	// (DefaultSharedConfigProfile) will be used as the profile to load the
    +	// session config from.
    +	Profile string
    +
    +	// Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
    +	// environment variable. By default a Session will be created using the
    +	// value provided by the AWS_SDK_LOAD_CONFIG environment variable.
    +	//
    +	// Setting this value to SharedConfigEnable or SharedConfigDisable
    +	// will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
    +	// and enable or disable the shared config functionality.
    +	SharedConfigState SharedConfigState
    +}
    +
    +// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
    +// environment, and user provided config files. This func uses the Options
    +// values to configure how the Session is created.
    +//
    +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
    +// the shared config file (~/.aws/config) will also be loaded in addition to
    +// the shared credentials file (~/.aws/config). Values set in both the
    +// shared config, and shared credentials will be taken from the shared
    +// credentials file. Enabling the Shared Config will also allow the Session
    +// to be built with retrieving credentials with AssumeRole set in the config.
    +//
    +//     // Equivalent to session.New
    +//     sess, err := session.NewSessionWithOptions(session.Options{})
    +//
    +//     // Specify profile to load for the session's config
    +//     sess, err := session.NewSessionWithOptions(session.Options{
    +//          Profile: "profile_name",
    +//     })
    +//
    +//     // Specify profile for config and region for requests
    +//     sess, err := session.NewSessionWithOptions(session.Options{
    +//          Config: aws.Config{Region: aws.String("us-east-1")},
    +//          Profile: "profile_name",
    +//     })
    +//
    +//     // Force enable Shared Config support
    +//     sess, err := session.NewSessionWithOptions(session.Options{
    +//         SharedConfigState: SharedConfigEnable,
    +//     })
    +func NewSessionWithOptions(opts Options) (*Session, error) {
    +	var envCfg envConfig
    +	if opts.SharedConfigState == SharedConfigEnable {
    +		envCfg = loadSharedEnvConfig()
    +	} else {
    +		envCfg = loadEnvConfig()
    +	}
    +
    +	if len(opts.Profile) > 0 {
    +		envCfg.Profile = opts.Profile
    +	}
    +
    +	switch opts.SharedConfigState {
    +	case SharedConfigDisable:
    +		envCfg.EnableSharedConfig = false
    +	case SharedConfigEnable:
    +		envCfg.EnableSharedConfig = true
    +	}
    +
    +	return newSession(envCfg, &opts.Config)
    +}
    +
    +// Must is a helper function to ensure the Session is valid and there was no
    +// error when calling a NewSession function.
    +//
    +// This helper is intended to be used in variable initialization to load the
    +// Session and configuration at startup. Such as:
    +//
    +//     var sess = session.Must(session.NewSession())
    +func Must(sess *Session, err error) *Session {
    +	if err != nil {
    +		panic(err)
    +	}
    +
    +	return sess
    +}
    +
    +func oldNewSession(cfgs ...*aws.Config) *Session {
    +	cfg := defaults.Config()
    +	handlers := defaults.Handlers()
    +
    +	// Apply the passed in configs so the configuration can be applied to the
    +	// default credential chain
    +	cfg.MergeIn(cfgs...)
    +	cfg.Credentials = defaults.CredChain(cfg, handlers)
    +
    +	// Reapply any passed in configs to override credentials if set
    +	cfg.MergeIn(cfgs...)
    +
    +	s := &Session{
    +		Config:   cfg,
    +		Handlers: handlers,
    +	}
    +
    +	initHandlers(s)
    +
    +	return s
    +}
    +
    +func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
    +	cfg := defaults.Config()
    +	handlers := defaults.Handlers()
    +
    +	// Get a merged version of the user provided config to determine if
    +	// credentials were.
    +	userCfg := &aws.Config{}
    +	userCfg.MergeIn(cfgs...)
    +
    +	// Order config files will be loaded in with later files overwriting
    +	// previous config file values.
    +	cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
    +	if !envCfg.EnableSharedConfig {
    +		// The shared config file (~/.aws/config) is only loaded if instructed
    +		// to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
    +		cfgFiles = cfgFiles[1:]
    +	}
    +
    +	// Load additional config from file(s)
    +	sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers)
    +
    +	s := &Session{
    +		Config:   cfg,
    +		Handlers: handlers,
    +	}
    +
    +	initHandlers(s)
    +
    +	return s, nil
    +}
    +
    +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) {
    +	// Merge in user provided configuration
    +	cfg.MergeIn(userCfg)
    +
    +	// Region if not already set by user
    +	if len(aws.StringValue(cfg.Region)) == 0 {
    +		if len(envCfg.Region) > 0 {
    +			cfg.WithRegion(envCfg.Region)
    +		} else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
    +			cfg.WithRegion(sharedCfg.Region)
    +		}
    +	}
    +
    +	// Configure credentials if not already set
    +	if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
    +		if len(envCfg.Creds.AccessKeyID) > 0 {
    +			cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
    +				envCfg.Creds,
    +			)
    +		} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
    +			cfgCp := *cfg
    +			cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
    +				sharedCfg.AssumeRoleSource.Creds,
    +			)
    +			cfg.Credentials = stscreds.NewCredentials(
    +				&Session{
    +					Config:   &cfgCp,
    +					Handlers: handlers.Copy(),
    +				},
    +				sharedCfg.AssumeRole.RoleARN,
    +				func(opt *stscreds.AssumeRoleProvider) {
    +					opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
    +
    +					if len(sharedCfg.AssumeRole.ExternalID) > 0 {
    +						opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
    +					}
    +
    +					// MFA not supported
    +				},
    +			)
    +		} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
    +			cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
    +				sharedCfg.Creds,
    +			)
    +		} else {
    +			// Fallback to default credentials provider, include mock errors
    +			// for the credential chain so user can identify why credentials
    +			// failed to be retrieved.
    +			cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
    +				VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
    +				Providers: []credentials.Provider{
    +					&credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
    +					&credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
    +					defaults.RemoteCredProvider(*cfg, handlers),
    +				},
    +			})
    +		}
    +	}
    +}
    +
    +type credProviderError struct {
    +	Err error
    +}
    +
    +var emptyCreds = credentials.Value{}
    +
    +func (c credProviderError) Retrieve() (credentials.Value, error) {
    +	return credentials.Value{}, c.Err
    +}
    +func (c credProviderError) IsExpired() bool {
    +	return true
    +}
    +
    +func initHandlers(s *Session) {
    +	// Add the Validate parameter handler if it is not disabled.
    +	s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
    +	if !aws.BoolValue(s.Config.DisableParamValidation) {
    +		s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
    +	}
    +}
    +
    +// Copy creates and returns a copy of the current Session, coping the config
    +// and handlers. If any additional configs are provided they will be merged
    +// on top of the Session's copied config.
    +//
    +//     // Create a copy of the current Session, configured for the us-west-2 region.
    +//     sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
    +func (s *Session) Copy(cfgs ...*aws.Config) *Session {
    +	newSession := &Session{
    +		Config:   s.Config.Copy(cfgs...),
    +		Handlers: s.Handlers.Copy(),
    +	}
    +
    +	initHandlers(newSession)
    +
    +	return newSession
    +}
    +
    +// ClientConfig satisfies the client.ConfigProvider interface and is used to
    +// configure the service client instances. Passing the Session to the service
    +// client's constructor (New) will use this method to configure the client.
    +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
    +	s = s.Copy(cfgs...)
    +	endpoint, signingRegion := endpoints.NormalizeEndpoint(
    +		aws.StringValue(s.Config.Endpoint),
    +		serviceName,
    +		aws.StringValue(s.Config.Region),
    +		aws.BoolValue(s.Config.DisableSSL),
    +		aws.BoolValue(s.Config.UseDualStack),
    +	)
    +
    +	return client.Config{
    +		Config:        s.Config,
    +		Handlers:      s.Handlers,
    +		Endpoint:      endpoint,
    +		SigningRegion: signingRegion,
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
    new file mode 100644
    index 0000000..b58076f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
    @@ -0,0 +1,295 @@
    +package session
    +
    +import (
    +	"fmt"
    +	"io/ioutil"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/go-ini/ini"
    +)
    +
    +const (
    +	// Static Credentials group
    +	accessKeyIDKey  = `aws_access_key_id`     // group required
    +	secretAccessKey = `aws_secret_access_key` // group required
    +	sessionTokenKey = `aws_session_token`     // optional
    +
    +	// Assume Role Credentials group
    +	roleArnKey         = `role_arn`          // group required
    +	sourceProfileKey   = `source_profile`    // group required
    +	externalIDKey      = `external_id`       // optional
    +	mfaSerialKey       = `mfa_serial`        // optional
    +	roleSessionNameKey = `role_session_name` // optional
    +
    +	// Additional Config fields
    +	regionKey = `region`
    +
    +	// DefaultSharedConfigProfile is the default profile to be used when
    +	// loading configuration from the config files if another profile name
    +	// is not provided.
    +	DefaultSharedConfigProfile = `default`
    +)
    +
    +type assumeRoleConfig struct {
    +	RoleARN         string
    +	SourceProfile   string
    +	ExternalID      string
    +	MFASerial       string
    +	RoleSessionName string
    +}
    +
    +// sharedConfig represents the configuration fields of the SDK config files.
    +type sharedConfig struct {
    +	// Credentials values from the config file. Both aws_access_key_id
    +	// and aws_secret_access_key must be provided together in the same file
    +	// to be considered valid. The values will be ignored if not a complete group.
    +	// aws_session_token is an optional field that can be provided if both of the
    +	// other two fields are also provided.
    +	//
    +	//	aws_access_key_id
    +	//	aws_secret_access_key
    +	//	aws_session_token
    +	Creds credentials.Value
    +
    +	AssumeRole       assumeRoleConfig
    +	AssumeRoleSource *sharedConfig
    +
    +	// Region is the region the SDK should use for looking up AWS service endpoints
    +	// and signing requests.
    +	//
    +	//	region
    +	Region string
    +}
    +
    +type sharedConfigFile struct {
    +	Filename string
    +	IniData  *ini.File
    +}
    +
    +// loadSharedConfig retrieves the configuration from the list of files
    +// using the profile provided. The order the files are listed will determine
    +// precedence. Values in subsequent files will overwrite values defined in
    +// earlier files.
    +//
    +// For example, given two files A and B. Both define credentials. If the order
    +// of the files are A then B, B's credential values will be used instead of A's.
    +//
    +// See sharedConfig.setFromFile for information how the config files
    +// will be loaded.
    +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
    +	if len(profile) == 0 {
    +		profile = DefaultSharedConfigProfile
    +	}
    +
    +	files, err := loadSharedConfigIniFiles(filenames)
    +	if err != nil {
    +		return sharedConfig{}, err
    +	}
    +
    +	cfg := sharedConfig{}
    +	if err = cfg.setFromIniFiles(profile, files); err != nil {
    +		return sharedConfig{}, err
    +	}
    +
    +	if len(cfg.AssumeRole.SourceProfile) > 0 {
    +		if err := cfg.setAssumeRoleSource(profile, files); err != nil {
    +			return sharedConfig{}, err
    +		}
    +	}
    +
    +	return cfg, nil
    +}
    +
    +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
    +	files := make([]sharedConfigFile, 0, len(filenames))
    +
    +	for _, filename := range filenames {
    +		b, err := ioutil.ReadFile(filename)
    +		if err != nil {
    +			// Skip files which can't be opened and read for whatever reason
    +			continue
    +		}
    +
    +		f, err := ini.Load(b)
    +		if err != nil {
    +			return nil, SharedConfigLoadError{Filename: filename}
    +		}
    +
    +		files = append(files, sharedConfigFile{
    +			Filename: filename, IniData: f,
    +		})
    +	}
    +
    +	return files, nil
    +}
    +
    +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
    +	var assumeRoleSrc sharedConfig
    +
    +	// Multiple level assume role chains are not support
    +	if cfg.AssumeRole.SourceProfile == origProfile {
    +		assumeRoleSrc = *cfg
    +		assumeRoleSrc.AssumeRole = assumeRoleConfig{}
    +	} else {
    +		err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
    +		if err != nil {
    +			return err
    +		}
    +	}
    +
    +	if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
    +		return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
    +	}
    +
    +	cfg.AssumeRoleSource = &assumeRoleSrc
    +
    +	return nil
    +}
    +
    +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
    +	// Trim files from the list that don't exist.
    +	for _, f := range files {
    +		if err := cfg.setFromIniFile(profile, f); err != nil {
    +			if _, ok := err.(SharedConfigProfileNotExistsError); ok {
    +				// Ignore proviles missings
    +				continue
    +			}
    +			return err
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +// setFromFile loads the configuration from the file using
    +// the profile provided. A sharedConfig pointer type value is used so that
    +// multiple config file loadings can be chained.
    +//
    +// Only loads complete logically grouped values, and will not set fields in cfg
    +// for incomplete grouped values in the config. Such as credentials. For example
    +// if a config file only includes aws_access_key_id but no aws_secret_access_key
    +// the aws_access_key_id will be ignored.
    +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
    +	section, err := file.IniData.GetSection(profile)
    +	if err != nil {
    +		// Fallback to to alternate profile name: profile 
    +		section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
    +		if err != nil {
    +			return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
    +		}
    +	}
    +
    +	// Shared Credentials
    +	akid := section.Key(accessKeyIDKey).String()
    +	secret := section.Key(secretAccessKey).String()
    +	if len(akid) > 0 && len(secret) > 0 {
    +		cfg.Creds = credentials.Value{
    +			AccessKeyID:     akid,
    +			SecretAccessKey: secret,
    +			SessionToken:    section.Key(sessionTokenKey).String(),
    +			ProviderName:    fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
    +		}
    +	}
    +
    +	// Assume Role
    +	roleArn := section.Key(roleArnKey).String()
    +	srcProfile := section.Key(sourceProfileKey).String()
    +	if len(roleArn) > 0 && len(srcProfile) > 0 {
    +		cfg.AssumeRole = assumeRoleConfig{
    +			RoleARN:         roleArn,
    +			SourceProfile:   srcProfile,
    +			ExternalID:      section.Key(externalIDKey).String(),
    +			MFASerial:       section.Key(mfaSerialKey).String(),
    +			RoleSessionName: section.Key(roleSessionNameKey).String(),
    +		}
    +	}
    +
    +	// Region
    +	if v := section.Key(regionKey).String(); len(v) > 0 {
    +		cfg.Region = v
    +	}
    +
    +	return nil
    +}
    +
    +// SharedConfigLoadError is an error for the shared config file failed to load.
    +type SharedConfigLoadError struct {
    +	Filename string
    +	Err      error
    +}
    +
    +// Code is the short id of the error.
    +func (e SharedConfigLoadError) Code() string {
    +	return "SharedConfigLoadError"
    +}
    +
    +// Message is the description of the error
    +func (e SharedConfigLoadError) Message() string {
    +	return fmt.Sprintf("failed to load config file, %s", e.Filename)
    +}
    +
    +// OrigErr is the underlying error that caused the failure.
    +func (e SharedConfigLoadError) OrigErr() error {
    +	return e.Err
    +}
    +
    +// Error satisfies the error interface.
    +func (e SharedConfigLoadError) Error() string {
    +	return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
    +}
    +
    +// SharedConfigProfileNotExistsError is an error for the shared config when
    +// the profile was not find in the config file.
    +type SharedConfigProfileNotExistsError struct {
    +	Profile string
    +	Err     error
    +}
    +
    +// Code is the short id of the error.
    +func (e SharedConfigProfileNotExistsError) Code() string {
    +	return "SharedConfigProfileNotExistsError"
    +}
    +
    +// Message is the description of the error
    +func (e SharedConfigProfileNotExistsError) Message() string {
    +	return fmt.Sprintf("failed to get profile, %s", e.Profile)
    +}
    +
    +// OrigErr is the underlying error that caused the failure.
    +func (e SharedConfigProfileNotExistsError) OrigErr() error {
    +	return e.Err
    +}
    +
    +// Error satisfies the error interface.
    +func (e SharedConfigProfileNotExistsError) Error() string {
    +	return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
    +}
    +
    +// SharedConfigAssumeRoleError is an error for the shared config when the
    +// profile contains assume role information, but that information is invalid
    +// or not complete.
    +type SharedConfigAssumeRoleError struct {
    +	RoleARN string
    +}
    +
    +// Code is the short id of the error.
    +func (e SharedConfigAssumeRoleError) Code() string {
    +	return "SharedConfigAssumeRoleError"
    +}
    +
    +// Message is the description of the error
    +func (e SharedConfigAssumeRoleError) Message() string {
    +	return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
    +		e.RoleARN)
    +}
    +
    +// OrigErr is the underlying error that caused the failure.
    +func (e SharedConfigAssumeRoleError) OrigErr() error {
    +	return nil
    +}
    +
    +// Error satisfies the error interface.
    +func (e SharedConfigAssumeRoleError) Error() string {
    +	return awserr.SprintError(e.Code(), e.Message(), "", nil)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
    new file mode 100644
    index 0000000..244c86d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
    @@ -0,0 +1,82 @@
    +package v4
    +
    +import (
    +	"net/http"
    +	"strings"
    +)
    +
    +// validator houses a set of rule needed for validation of a
    +// string value
    +type rules []rule
    +
    +// rule interface allows for more flexible rules and just simply
    +// checks whether or not a value adheres to that rule
    +type rule interface {
    +	IsValid(value string) bool
    +}
    +
    +// IsValid will iterate through all rules and see if any rules
    +// apply to the value and supports nested rules
    +func (r rules) IsValid(value string) bool {
    +	for _, rule := range r {
    +		if rule.IsValid(value) {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// mapRule generic rule for maps
    +type mapRule map[string]struct{}
    +
    +// IsValid for the map rule satisfies whether it exists in the map
    +func (m mapRule) IsValid(value string) bool {
    +	_, ok := m[value]
    +	return ok
    +}
    +
    +// whitelist is a generic rule for whitelisting
    +type whitelist struct {
    +	rule
    +}
    +
    +// IsValid for whitelist checks if the value is within the whitelist
    +func (w whitelist) IsValid(value string) bool {
    +	return w.rule.IsValid(value)
    +}
    +
    +// blacklist is a generic rule for blacklisting
    +type blacklist struct {
    +	rule
    +}
    +
    +// IsValid for whitelist checks if the value is within the whitelist
    +func (b blacklist) IsValid(value string) bool {
    +	return !b.rule.IsValid(value)
    +}
    +
    +type patterns []string
    +
    +// IsValid for patterns checks each pattern and returns if a match has
    +// been found
    +func (p patterns) IsValid(value string) bool {
    +	for _, pattern := range p {
    +		if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// inclusiveRules rules allow for rules to depend on one another
    +type inclusiveRules []rule
    +
    +// IsValid will return true if all rules are true
    +func (r inclusiveRules) IsValid(value string) bool {
    +	for _, rule := range r {
    +		if !rule.IsValid(value) {
    +			return false
    +		}
    +	}
    +	return true
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
    new file mode 100644
    index 0000000..bd082e9
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
    @@ -0,0 +1,24 @@
    +// +build go1.5
    +
    +package v4
    +
    +import (
    +	"net/url"
    +	"strings"
    +)
    +
    +func getURIPath(u *url.URL) string {
    +	var uri string
    +
    +	if len(u.Opaque) > 0 {
    +		uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
    +	} else {
    +		uri = u.EscapedPath()
    +	}
    +
    +	if len(uri) == 0 {
    +		uri = "/"
    +	}
    +
    +	return uri
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
    new file mode 100644
    index 0000000..7966041
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
    @@ -0,0 +1,24 @@
    +// +build !go1.5
    +
    +package v4
    +
    +import (
    +	"net/url"
    +	"strings"
    +)
    +
    +func getURIPath(u *url.URL) string {
    +	var uri string
    +
    +	if len(u.Opaque) > 0 {
    +		uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
    +	} else {
    +		uri = u.Path
    +	}
    +
    +	if len(uri) == 0 {
    +		uri = "/"
    +	}
    +
    +	return uri
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
    new file mode 100644
    index 0000000..986530b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
    @@ -0,0 +1,713 @@
    +// Package v4 implements signing for AWS V4 signer
    +//
    +// Provides request signing for request that need to be signed with
    +// AWS V4 Signatures.
    +//
    +// Standalone Signer
    +//
    +// Generally using the signer outside of the SDK should not require any additional
    +// logic when using Go v1.5 or higher. The signer does this by taking advantage
    +// of the URL.EscapedPath method. If your request URI requires additional escaping
    +// you many need to use the URL.Opaque to define what the raw URI should be sent
    +// to the service as.
    +//
    +// The signer will first check the URL.Opaque field, and use its value if set.
    +// The signer does require the URL.Opaque field to be set in the form of:
    +//
    +//     "///"
    +//
    +//     // e.g.
    +//     "//example.com/some/path"
    +//
    +// The leading "//" and hostname are required or the URL.Opaque escaping will
    +// not work correctly.
    +//
    +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
    +// method and using the returned value. If you're using Go v1.4 you must set
    +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
    +// Go v1.5 the signer will fallback to URL.Path.
    +//
    +// AWS v4 signature validation requires that the canonical string's URI path
    +// element must be the URI escaped form of the HTTP request's path.
    +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
    +//
    +// The Go HTTP client will perform escaping automatically on the request. Some
    +// of these escaping may cause signature validation errors because the HTTP
    +// request differs from the URI path or query that the signature was generated.
    +// https://golang.org/pkg/net/url/#URL.EscapedPath
    +//
    +// Because of this, it is recommended that when using the signer outside of the
    +// SDK that explicitly escaping the request prior to being signed is preferable,
    +// and will help prevent signature validation errors. This can be done by setting
    +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
    +// call URL.EscapedPath() if Opaque is not set.
    +//
    +// Test `TestStandaloneSign` provides a complete example of using the signer
    +// outside of the SDK and pre-escaping the URI path.
    +package v4
    +
    +import (
    +	"bytes"
    +	"crypto/hmac"
    +	"crypto/sha256"
    +	"encoding/hex"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"net/url"
    +	"sort"
    +	"strconv"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/credentials"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/protocol/rest"
    +)
    +
    +const (
    +	authHeaderPrefix = "AWS4-HMAC-SHA256"
    +	timeFormat       = "20060102T150405Z"
    +	shortTimeFormat  = "20060102"
    +
    +	// emptyStringSHA256 is a SHA256 of an empty string
    +	emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
    +)
    +
    +var ignoredHeaders = rules{
    +	blacklist{
    +		mapRule{
    +			"Authorization": struct{}{},
    +			"User-Agent":    struct{}{},
    +		},
    +	},
    +}
    +
    +// requiredSignedHeaders is a whitelist for build canonical headers.
    +var requiredSignedHeaders = rules{
    +	whitelist{
    +		mapRule{
    +			"Cache-Control":                                               struct{}{},
    +			"Content-Disposition":                                         struct{}{},
    +			"Content-Encoding":                                            struct{}{},
    +			"Content-Language":                                            struct{}{},
    +			"Content-Md5":                                                 struct{}{},
    +			"Content-Type":                                                struct{}{},
    +			"Expires":                                                     struct{}{},
    +			"If-Match":                                                    struct{}{},
    +			"If-Modified-Since":                                           struct{}{},
    +			"If-None-Match":                                               struct{}{},
    +			"If-Unmodified-Since":                                         struct{}{},
    +			"Range":                                                       struct{}{},
    +			"X-Amz-Acl":                                                   struct{}{},
    +			"X-Amz-Copy-Source":                                           struct{}{},
    +			"X-Amz-Copy-Source-If-Match":                                  struct{}{},
    +			"X-Amz-Copy-Source-If-Modified-Since":                         struct{}{},
    +			"X-Amz-Copy-Source-If-None-Match":                             struct{}{},
    +			"X-Amz-Copy-Source-If-Unmodified-Since":                       struct{}{},
    +			"X-Amz-Copy-Source-Range":                                     struct{}{},
    +			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
    +			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key":       struct{}{},
    +			"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5":   struct{}{},
    +			"X-Amz-Grant-Full-control":                                    struct{}{},
    +			"X-Amz-Grant-Read":                                            struct{}{},
    +			"X-Amz-Grant-Read-Acp":                                        struct{}{},
    +			"X-Amz-Grant-Write":                                           struct{}{},
    +			"X-Amz-Grant-Write-Acp":                                       struct{}{},
    +			"X-Amz-Metadata-Directive":                                    struct{}{},
    +			"X-Amz-Mfa":                                                   struct{}{},
    +			"X-Amz-Request-Payer":                                         struct{}{},
    +			"X-Amz-Server-Side-Encryption":                                struct{}{},
    +			"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id":                 struct{}{},
    +			"X-Amz-Server-Side-Encryption-Customer-Algorithm":             struct{}{},
    +			"X-Amz-Server-Side-Encryption-Customer-Key":                   struct{}{},
    +			"X-Amz-Server-Side-Encryption-Customer-Key-Md5":               struct{}{},
    +			"X-Amz-Storage-Class":                                         struct{}{},
    +			"X-Amz-Website-Redirect-Location":                             struct{}{},
    +		},
    +	},
    +	patterns{"X-Amz-Meta-"},
    +}
    +
    +// allowedHoisting is a whitelist for build query headers. The boolean value
    +// represents whether or not it is a pattern.
    +var allowedQueryHoisting = inclusiveRules{
    +	blacklist{requiredSignedHeaders},
    +	patterns{"X-Amz-"},
    +}
    +
    +// Signer applies AWS v4 signing to given request. Use this to sign requests
    +// that need to be signed with AWS V4 Signatures.
    +type Signer struct {
    +	// The authentication credentials the request will be signed against.
    +	// This value must be set to sign requests.
    +	Credentials *credentials.Credentials
    +
    +	// Sets the log level the signer should use when reporting information to
    +	// the logger. If the logger is nil nothing will be logged. See
    +	// aws.LogLevelType for more information on available logging levels
    +	//
    +	// By default nothing will be logged.
    +	Debug aws.LogLevelType
    +
    +	// The logger loging information will be written to. If there the logger
    +	// is nil, nothing will be logged.
    +	Logger aws.Logger
    +
    +	// Disables the Signer's moving HTTP header key/value pairs from the HTTP
    +	// request header to the request's query string. This is most commonly used
    +	// with pre-signed requests preventing headers from being added to the
    +	// request's query string.
    +	DisableHeaderHoisting bool
    +
    +	// Disables the automatic escaping of the URI path of the request for the
    +	// siganture's canonical string's path. For services that do not need additional
    +	// escaping then use this to disable the signer escaping the path.
    +	//
    +	// S3 is an example of a service that does not need additional escaping.
    +	//
    +	// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
    +	DisableURIPathEscaping bool
    +
    +	// currentTimeFn returns the time value which represents the current time.
    +	// This value should only be used for testing. If it is nil the default
    +	// time.Now will be used.
    +	currentTimeFn func() time.Time
    +}
    +
    +// NewSigner returns a Signer pointer configured with the credentials and optional
    +// option values provided. If not options are provided the Signer will use its
    +// default configuration.
    +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
    +	v4 := &Signer{
    +		Credentials: credentials,
    +	}
    +
    +	for _, option := range options {
    +		option(v4)
    +	}
    +
    +	return v4
    +}
    +
    +type signingCtx struct {
    +	ServiceName      string
    +	Region           string
    +	Request          *http.Request
    +	Body             io.ReadSeeker
    +	Query            url.Values
    +	Time             time.Time
    +	ExpireTime       time.Duration
    +	SignedHeaderVals http.Header
    +
    +	DisableURIPathEscaping bool
    +
    +	credValues         credentials.Value
    +	isPresign          bool
    +	formattedTime      string
    +	formattedShortTime string
    +
    +	bodyDigest       string
    +	signedHeaders    string
    +	canonicalHeaders string
    +	canonicalString  string
    +	credentialString string
    +	stringToSign     string
    +	signature        string
    +	authorization    string
    +}
    +
    +// Sign signs AWS v4 requests with the provided body, service name, region the
    +// request is made to, and time the request is signed at. The signTime allows
    +// you to specify that a request is signed for the future, and cannot be
    +// used until then.
    +//
    +// Returns a list of HTTP headers that were included in the signature or an
    +// error if signing the request failed. Generally for signed requests this value
    +// is not needed as the full request context will be captured by the http.Request
    +// value. It is included for reference though.
    +//
    +// Sign will set the request's Body to be the `body` parameter passed in. If
    +// the body is not already an io.ReadCloser, it will be wrapped within one. If
    +// a `nil` body parameter passed to Sign, the request's Body field will be
    +// also set to nil. Its important to note that this functionality will not
    +// change the request's ContentLength of the request.
    +//
    +// Sign differs from Presign in that it will sign the request using HTTP
    +// header values. This type of signing is intended for http.Request values that
    +// will not be shared, or are shared in a way the header values on the request
    +// will not be lost.
    +//
    +// The requests body is an io.ReadSeeker so the SHA256 of the body can be
    +// generated. To bypass the signer computing the hash you can set the
    +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
    +// only compute the hash if the request header value is empty.
    +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
    +	return v4.signWithBody(r, body, service, region, 0, signTime)
    +}
    +
    +// Presign signs AWS v4 requests with the provided body, service name, region
    +// the request is made to, and time the request is signed at. The signTime
    +// allows you to specify that a request is signed for the future, and cannot
    +// be used until then.
    +//
    +// Returns a list of HTTP headers that were included in the signature or an
    +// error if signing the request failed. For presigned requests these headers
    +// and their values must be included on the HTTP request when it is made. This
    +// is helpful to know what header values need to be shared with the party the
    +// presigned request will be distributed to.
    +//
    +// Presign differs from Sign in that it will sign the request using query string
    +// instead of header values. This allows you to share the Presigned Request's
    +// URL with third parties, or distribute it throughout your system with minimal
    +// dependencies.
    +//
    +// Presign also takes an exp value which is the duration the
    +// signed request will be valid after the signing time. This is allows you to
    +// set when the request will expire.
    +//
    +// The requests body is an io.ReadSeeker so the SHA256 of the body can be
    +// generated. To bypass the signer computing the hash you can set the
    +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
    +// only compute the hash if the request header value is empty.
    +//
    +// Presigning a S3 request will not compute the body's SHA256 hash by default.
    +// This is done due to the general use case for S3 presigned URLs is to share
    +// PUT/GET capabilities. If you would like to include the body's SHA256 in the
    +// presigned request's signature you can set the "X-Amz-Content-Sha256"
    +// HTTP header and that will be included in the request's signature.
    +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
    +	return v4.signWithBody(r, body, service, region, exp, signTime)
    +}
    +
    +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
    +	currentTimeFn := v4.currentTimeFn
    +	if currentTimeFn == nil {
    +		currentTimeFn = time.Now
    +	}
    +
    +	ctx := &signingCtx{
    +		Request:                r,
    +		Body:                   body,
    +		Query:                  r.URL.Query(),
    +		Time:                   signTime,
    +		ExpireTime:             exp,
    +		isPresign:              exp != 0,
    +		ServiceName:            service,
    +		Region:                 region,
    +		DisableURIPathEscaping: v4.DisableURIPathEscaping,
    +	}
    +
    +	if ctx.isRequestSigned() {
    +		ctx.Time = currentTimeFn()
    +		ctx.handlePresignRemoval()
    +	}
    +
    +	var err error
    +	ctx.credValues, err = v4.Credentials.Get()
    +	if err != nil {
    +		return http.Header{}, err
    +	}
    +
    +	ctx.assignAmzQueryValues()
    +	ctx.build(v4.DisableHeaderHoisting)
    +
    +	// If the request is not presigned the body should be attached to it. This
    +	// prevents the confusion of wanting to send a signed request without
    +	// the body the request was signed for attached.
    +	if !ctx.isPresign {
    +		var reader io.ReadCloser
    +		if body != nil {
    +			var ok bool
    +			if reader, ok = body.(io.ReadCloser); !ok {
    +				reader = ioutil.NopCloser(body)
    +			}
    +		}
    +		r.Body = reader
    +	}
    +
    +	if v4.Debug.Matches(aws.LogDebugWithSigning) {
    +		v4.logSigningInfo(ctx)
    +	}
    +
    +	return ctx.SignedHeaderVals, nil
    +}
    +
    +func (ctx *signingCtx) handlePresignRemoval() {
    +	if !ctx.isPresign {
    +		return
    +	}
    +
    +	// The credentials have expired for this request. The current signing
    +	// is invalid, and needs to be request because the request will fail.
    +	ctx.removePresign()
    +
    +	// Update the request's query string to ensure the values stays in
    +	// sync in the case retrieving the new credentials fails.
    +	ctx.Request.URL.RawQuery = ctx.Query.Encode()
    +}
    +
    +func (ctx *signingCtx) assignAmzQueryValues() {
    +	if ctx.isPresign {
    +		ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
    +		if ctx.credValues.SessionToken != "" {
    +			ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
    +		} else {
    +			ctx.Query.Del("X-Amz-Security-Token")
    +		}
    +
    +		return
    +	}
    +
    +	if ctx.credValues.SessionToken != "" {
    +		ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
    +	}
    +}
    +
    +// SignRequestHandler is a named request handler the SDK will use to sign
    +// service client request with using the V4 signature.
    +var SignRequestHandler = request.NamedHandler{
    +	Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
    +}
    +
    +// SignSDKRequest signs an AWS request with the V4 signature. This
    +// request handler is bested used only with the SDK's built in service client's
    +// API operation requests.
    +//
    +// This function should not be used on its on its own, but in conjunction with
    +// an AWS service client's API operation call. To sign a standalone request
    +// not created by a service client's API operation method use the "Sign" or
    +// "Presign" functions of the "Signer" type.
    +//
    +// If the credentials of the request's config are set to
    +// credentials.AnonymousCredentials the request will not be signed.
    +func SignSDKRequest(req *request.Request) {
    +	signSDKRequestWithCurrTime(req, time.Now)
    +}
    +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
    +	// If the request does not need to be signed ignore the signing of the
    +	// request if the AnonymousCredentials object is used.
    +	if req.Config.Credentials == credentials.AnonymousCredentials {
    +		return
    +	}
    +
    +	region := req.ClientInfo.SigningRegion
    +	if region == "" {
    +		region = aws.StringValue(req.Config.Region)
    +	}
    +
    +	name := req.ClientInfo.SigningName
    +	if name == "" {
    +		name = req.ClientInfo.ServiceName
    +	}
    +
    +	v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
    +		v4.Debug = req.Config.LogLevel.Value()
    +		v4.Logger = req.Config.Logger
    +		v4.DisableHeaderHoisting = req.NotHoist
    +		v4.currentTimeFn = curTimeFn
    +		if name == "s3" {
    +			// S3 service should not have any escaping applied
    +			v4.DisableURIPathEscaping = true
    +		}
    +	})
    +
    +	signingTime := req.Time
    +	if !req.LastSignedAt.IsZero() {
    +		signingTime = req.LastSignedAt
    +	}
    +
    +	signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
    +		name, region, req.ExpireTime, signingTime,
    +	)
    +	if err != nil {
    +		req.Error = err
    +		req.SignedHeaderVals = nil
    +		return
    +	}
    +
    +	req.SignedHeaderVals = signedHeaders
    +	req.LastSignedAt = curTimeFn()
    +}
    +
    +const logSignInfoMsg = `DEBUG: Request Signature:
    +---[ CANONICAL STRING  ]-----------------------------
    +%s
    +---[ STRING TO SIGN ]--------------------------------
    +%s%s
    +-----------------------------------------------------`
    +const logSignedURLMsg = `
    +---[ SIGNED URL ]------------------------------------
    +%s`
    +
    +func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
    +	signedURLMsg := ""
    +	if ctx.isPresign {
    +		signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
    +	}
    +	msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
    +	v4.Logger.Log(msg)
    +}
    +
    +func (ctx *signingCtx) build(disableHeaderHoisting bool) {
    +	ctx.buildTime()             // no depends
    +	ctx.buildCredentialString() // no depends
    +
    +	unsignedHeaders := ctx.Request.Header
    +	if ctx.isPresign {
    +		if !disableHeaderHoisting {
    +			urlValues := url.Values{}
    +			urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
    +			for k := range urlValues {
    +				ctx.Query[k] = urlValues[k]
    +			}
    +		}
    +	}
    +
    +	ctx.buildBodyDigest()
    +	ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
    +	ctx.buildCanonicalString() // depends on canon headers / signed headers
    +	ctx.buildStringToSign()    // depends on canon string
    +	ctx.buildSignature()       // depends on string to sign
    +
    +	if ctx.isPresign {
    +		ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
    +	} else {
    +		parts := []string{
    +			authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
    +			"SignedHeaders=" + ctx.signedHeaders,
    +			"Signature=" + ctx.signature,
    +		}
    +		ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
    +	}
    +}
    +
    +func (ctx *signingCtx) buildTime() {
    +	ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
    +	ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
    +
    +	if ctx.isPresign {
    +		duration := int64(ctx.ExpireTime / time.Second)
    +		ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
    +		ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
    +	} else {
    +		ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
    +	}
    +}
    +
    +func (ctx *signingCtx) buildCredentialString() {
    +	ctx.credentialString = strings.Join([]string{
    +		ctx.formattedShortTime,
    +		ctx.Region,
    +		ctx.ServiceName,
    +		"aws4_request",
    +	}, "/")
    +
    +	if ctx.isPresign {
    +		ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
    +	}
    +}
    +
    +func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
    +	query := url.Values{}
    +	unsignedHeaders := http.Header{}
    +	for k, h := range header {
    +		if r.IsValid(k) {
    +			query[k] = h
    +		} else {
    +			unsignedHeaders[k] = h
    +		}
    +	}
    +
    +	return query, unsignedHeaders
    +}
    +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
    +	var headers []string
    +	headers = append(headers, "host")
    +	for k, v := range header {
    +		canonicalKey := http.CanonicalHeaderKey(k)
    +		if !r.IsValid(canonicalKey) {
    +			continue // ignored header
    +		}
    +		if ctx.SignedHeaderVals == nil {
    +			ctx.SignedHeaderVals = make(http.Header)
    +		}
    +
    +		lowerCaseKey := strings.ToLower(k)
    +		if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
    +			// include additional values
    +			ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
    +			continue
    +		}
    +
    +		headers = append(headers, lowerCaseKey)
    +		ctx.SignedHeaderVals[lowerCaseKey] = v
    +	}
    +	sort.Strings(headers)
    +
    +	ctx.signedHeaders = strings.Join(headers, ";")
    +
    +	if ctx.isPresign {
    +		ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
    +	}
    +
    +	headerValues := make([]string, len(headers))
    +	for i, k := range headers {
    +		if k == "host" {
    +			headerValues[i] = "host:" + ctx.Request.URL.Host
    +		} else {
    +			headerValues[i] = k + ":" +
    +				strings.Join(ctx.SignedHeaderVals[k], ",")
    +		}
    +	}
    +
    +	ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
    +}
    +
    +func (ctx *signingCtx) buildCanonicalString() {
    +	ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
    +
    +	uri := getURIPath(ctx.Request.URL)
    +
    +	if !ctx.DisableURIPathEscaping {
    +		uri = rest.EscapePath(uri, false)
    +	}
    +
    +	ctx.canonicalString = strings.Join([]string{
    +		ctx.Request.Method,
    +		uri,
    +		ctx.Request.URL.RawQuery,
    +		ctx.canonicalHeaders + "\n",
    +		ctx.signedHeaders,
    +		ctx.bodyDigest,
    +	}, "\n")
    +}
    +
    +func (ctx *signingCtx) buildStringToSign() {
    +	ctx.stringToSign = strings.Join([]string{
    +		authHeaderPrefix,
    +		ctx.formattedTime,
    +		ctx.credentialString,
    +		hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
    +	}, "\n")
    +}
    +
    +func (ctx *signingCtx) buildSignature() {
    +	secret := ctx.credValues.SecretAccessKey
    +	date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
    +	region := makeHmac(date, []byte(ctx.Region))
    +	service := makeHmac(region, []byte(ctx.ServiceName))
    +	credentials := makeHmac(service, []byte("aws4_request"))
    +	signature := makeHmac(credentials, []byte(ctx.stringToSign))
    +	ctx.signature = hex.EncodeToString(signature)
    +}
    +
    +func (ctx *signingCtx) buildBodyDigest() {
    +	hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
    +	if hash == "" {
    +		if ctx.isPresign && ctx.ServiceName == "s3" {
    +			hash = "UNSIGNED-PAYLOAD"
    +		} else if ctx.Body == nil {
    +			hash = emptyStringSHA256
    +		} else {
    +			hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
    +		}
    +		if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
    +			ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
    +		}
    +	}
    +	ctx.bodyDigest = hash
    +}
    +
    +// isRequestSigned returns if the request is currently signed or presigned
    +func (ctx *signingCtx) isRequestSigned() bool {
    +	if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
    +		return true
    +	}
    +	if ctx.Request.Header.Get("Authorization") != "" {
    +		return true
    +	}
    +
    +	return false
    +}
    +
    +// unsign removes signing flags for both signed and presigned requests.
    +func (ctx *signingCtx) removePresign() {
    +	ctx.Query.Del("X-Amz-Algorithm")
    +	ctx.Query.Del("X-Amz-Signature")
    +	ctx.Query.Del("X-Amz-Security-Token")
    +	ctx.Query.Del("X-Amz-Date")
    +	ctx.Query.Del("X-Amz-Expires")
    +	ctx.Query.Del("X-Amz-Credential")
    +	ctx.Query.Del("X-Amz-SignedHeaders")
    +}
    +
    +func makeHmac(key []byte, data []byte) []byte {
    +	hash := hmac.New(sha256.New, key)
    +	hash.Write(data)
    +	return hash.Sum(nil)
    +}
    +
    +func makeSha256(data []byte) []byte {
    +	hash := sha256.New()
    +	hash.Write(data)
    +	return hash.Sum(nil)
    +}
    +
    +func makeSha256Reader(reader io.ReadSeeker) []byte {
    +	hash := sha256.New()
    +	start, _ := reader.Seek(0, 1)
    +	defer reader.Seek(start, 0)
    +
    +	io.Copy(hash, reader)
    +	return hash.Sum(nil)
    +}
    +
    +const doubleSpaces = "  "
    +
    +var doubleSpaceBytes = []byte(doubleSpaces)
    +
    +func stripExcessSpaces(headerVals []string) []string {
    +	vals := make([]string, len(headerVals))
    +	for i, str := range headerVals {
    +		// Trim leading and trailing spaces
    +		trimmed := strings.TrimSpace(str)
    +
    +		idx := strings.Index(trimmed, doubleSpaces)
    +		var buf []byte
    +		for idx > -1 {
    +			// Multiple adjacent spaces found
    +			if buf == nil {
    +				// first time create the buffer
    +				buf = []byte(trimmed)
    +			}
    +
    +			stripToIdx := -1
    +			for j := idx + 1; j < len(buf); j++ {
    +				if buf[j] != ' ' {
    +					buf = append(buf[:idx+1], buf[j:]...)
    +					stripToIdx = j
    +					break
    +				}
    +			}
    +
    +			if stripToIdx >= 0 {
    +				idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
    +				if idx >= 0 {
    +					idx += stripToIdx
    +				}
    +			} else {
    +				idx = -1
    +			}
    +		}
    +
    +		if buf != nil {
    +			vals[i] = string(buf)
    +		} else {
    +			vals[i] = trimmed
    +		}
    +	}
    +	return vals
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/types.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/types.go
    new file mode 100644
    index 0000000..fa014b4
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/types.go
    @@ -0,0 +1,106 @@
    +package aws
    +
    +import (
    +	"io"
    +	"sync"
    +)
    +
    +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
    +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
    +	return ReaderSeekerCloser{r}
    +}
    +
    +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
    +// io.Closer interfaces to the underlying object if they are available.
    +type ReaderSeekerCloser struct {
    +	r io.Reader
    +}
    +
    +// Read reads from the reader up to size of p. The number of bytes read, and
    +// error if it occurred will be returned.
    +//
    +// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
    +//
    +// Performs the same functionality as io.Reader Read
    +func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
    +	switch t := r.r.(type) {
    +	case io.Reader:
    +		return t.Read(p)
    +	}
    +	return 0, nil
    +}
    +
    +// Seek sets the offset for the next Read to offset, interpreted according to
    +// whence: 0 means relative to the origin of the file, 1 means relative to the
    +// current offset, and 2 means relative to the end. Seek returns the new offset
    +// and an error, if any.
    +//
    +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
    +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
    +	switch t := r.r.(type) {
    +	case io.Seeker:
    +		return t.Seek(offset, whence)
    +	}
    +	return int64(0), nil
    +}
    +
    +// Close closes the ReaderSeekerCloser.
    +//
    +// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
    +func (r ReaderSeekerCloser) Close() error {
    +	switch t := r.r.(type) {
    +	case io.Closer:
    +		return t.Close()
    +	}
    +	return nil
    +}
    +
    +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
    +// Can be used with the s3manager.Downloader to download content to a buffer
    +// in memory. Safe to use concurrently.
    +type WriteAtBuffer struct {
    +	buf []byte
    +	m   sync.Mutex
    +
    +	// GrowthCoeff defines the growth rate of the internal buffer. By
    +	// default, the growth rate is 1, where expanding the internal
    +	// buffer will allocate only enough capacity to fit the new expected
    +	// length.
    +	GrowthCoeff float64
    +}
    +
    +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
    +// provided by buf.
    +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
    +	return &WriteAtBuffer{buf: buf}
    +}
    +
    +// WriteAt writes a slice of bytes to a buffer starting at the position provided
    +// The number of bytes written will be returned, or error. Can overwrite previous
    +// written slices if the write ats overlap.
    +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
    +	pLen := len(p)
    +	expLen := pos + int64(pLen)
    +	b.m.Lock()
    +	defer b.m.Unlock()
    +	if int64(len(b.buf)) < expLen {
    +		if int64(cap(b.buf)) < expLen {
    +			if b.GrowthCoeff < 1 {
    +				b.GrowthCoeff = 1
    +			}
    +			newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
    +			copy(newBuf, b.buf)
    +			b.buf = newBuf
    +		}
    +		b.buf = b.buf[:expLen]
    +	}
    +	copy(b.buf[pos:], p)
    +	return pLen, nil
    +}
    +
    +// Bytes returns a slice of bytes written to the buffer.
    +func (b *WriteAtBuffer) Bytes() []byte {
    +	b.m.Lock()
    +	defer b.m.Unlock()
    +	return b.buf[:len(b.buf):len(b.buf)]
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/version.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/version.go
    new file mode 100644
    index 0000000..9d5f4a0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/aws/version.go
    @@ -0,0 +1,8 @@
    +// Package aws provides core functionality for making requests to AWS services.
    +package aws
    +
    +// SDKName is the name of this AWS SDK
    +const SDKName = "aws-sdk-go"
    +
    +// SDKVersion is the version of this SDK
    +const SDKVersion = "1.5.1"
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
    new file mode 100644
    index 0000000..19d9756
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
    @@ -0,0 +1,70 @@
    +// Package endpoints validates regional endpoints for services.
    +package endpoints
    +
    +//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
    +//go:generate gofmt -s -w endpoints_map.go
    +
    +import (
    +	"fmt"
    +	"regexp"
    +	"strings"
    +)
    +
    +// NormalizeEndpoint takes and endpoint and service API information to return a
    +// normalized endpoint and signing region.  If the endpoint is not an empty string
    +// the service name and region will be used to look up the service's API endpoint.
    +// If the endpoint is provided the scheme will be added if it is not present.
    +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) {
    +	if endpoint == "" {
    +		return EndpointForRegion(serviceName, region, disableSSL, useDualStack)
    +	}
    +
    +	return AddScheme(endpoint, disableSSL), ""
    +}
    +
    +// EndpointForRegion returns an endpoint and its signing region for a service and region.
    +// if the service and region pair are not found endpoint and signingRegion will be empty.
    +func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) {
    +	dualStackField := ""
    +	if useDualStack {
    +		dualStackField = "/dualstack"
    +	}
    +
    +	derivedKeys := []string{
    +		region + "/" + svcName + dualStackField,
    +		region + "/*" + dualStackField,
    +		"*/" + svcName + dualStackField,
    +		"*/*" + dualStackField,
    +	}
    +
    +	for _, key := range derivedKeys {
    +		if val, ok := endpointsMap.Endpoints[key]; ok {
    +			ep := val.Endpoint
    +			ep = strings.Replace(ep, "{region}", region, -1)
    +			ep = strings.Replace(ep, "{service}", svcName, -1)
    +
    +			endpoint = ep
    +			signingRegion = val.SigningRegion
    +			break
    +		}
    +	}
    +
    +	return AddScheme(endpoint, disableSSL), signingRegion
    +}
    +
    +// Regular expression to determine if the endpoint string is prefixed with a scheme.
    +var schemeRE = regexp.MustCompile("^([^:]+)://")
    +
    +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
    +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
    +func AddScheme(endpoint string, disableSSL bool) string {
    +	if endpoint != "" && !schemeRE.MatchString(endpoint) {
    +		scheme := "https"
    +		if disableSSL {
    +			scheme = "http"
    +		}
    +		endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
    +	}
    +
    +	return endpoint
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
    new file mode 100644
    index 0000000..5594f2e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
    @@ -0,0 +1,82 @@
    +{
    +  "version": 2,
    +  "endpoints": {
    +    "*/*": {
    +      "endpoint": "{service}.{region}.amazonaws.com"
    +    },
    +    "cn-north-1/*": {
    +      "endpoint": "{service}.{region}.amazonaws.com.cn",
    +      "signatureVersion": "v4"
    +    },
    +    "cn-north-1/ec2metadata": {
    +      "endpoint": "http://169.254.169.254/latest"
    +    },
    +    "us-gov-west-1/iam": {
    +      "endpoint": "iam.us-gov.amazonaws.com"
    +    },
    +    "us-gov-west-1/sts": {
    +      "endpoint": "sts.us-gov-west-1.amazonaws.com"
    +    },
    +    "us-gov-west-1/s3": {
    +      "endpoint": "s3-{region}.amazonaws.com"
    +    },
    +    "us-gov-west-1/ec2metadata": {
    +      "endpoint": "http://169.254.169.254/latest"
    +    },
    +    "*/budgets": {
    +      "endpoint": "budgets.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/cloudfront": {
    +      "endpoint": "cloudfront.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/cloudsearchdomain": {
    +      "endpoint": "",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/data.iot": {
    +      "endpoint": "",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/ec2metadata": {
    +      "endpoint": "http://169.254.169.254/latest"
    +    },
    +    "*/iam": {
    +      "endpoint": "iam.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/importexport": {
    +      "endpoint": "importexport.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/route53": {
    +      "endpoint": "route53.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/sts": {
    +      "endpoint": "sts.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/waf": {
    +      "endpoint": "waf.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "us-east-1/sdb": {
    +      "endpoint": "sdb.amazonaws.com",
    +      "signingRegion": "us-east-1"
    +    },
    +    "*/s3": {
    +      "endpoint": "s3-{region}.amazonaws.com"
    +    },
    +    "*/s3/dualstack": {
    +      "endpoint": "s3.dualstack.{region}.amazonaws.com"
    +    },
    +    "us-east-1/s3": {
    +      "endpoint": "s3.amazonaws.com"
    +    },
    +    "eu-central-1/s3": {
    +      "endpoint": "{service}.{region}.amazonaws.com"
    +    }
    +  }
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
    new file mode 100644
    index 0000000..e79e678
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
    @@ -0,0 +1,95 @@
    +package endpoints
    +
    +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
    +
    +type endpointStruct struct {
    +	Version   int
    +	Endpoints map[string]endpointEntry
    +}
    +
    +type endpointEntry struct {
    +	Endpoint      string
    +	SigningRegion string
    +}
    +
    +var endpointsMap = endpointStruct{
    +	Version: 2,
    +	Endpoints: map[string]endpointEntry{
    +		"*/*": {
    +			Endpoint: "{service}.{region}.amazonaws.com",
    +		},
    +		"*/budgets": {
    +			Endpoint:      "budgets.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/cloudfront": {
    +			Endpoint:      "cloudfront.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/cloudsearchdomain": {
    +			Endpoint:      "",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/data.iot": {
    +			Endpoint:      "",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/ec2metadata": {
    +			Endpoint: "http://169.254.169.254/latest",
    +		},
    +		"*/iam": {
    +			Endpoint:      "iam.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/importexport": {
    +			Endpoint:      "importexport.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/route53": {
    +			Endpoint:      "route53.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/s3": {
    +			Endpoint: "s3-{region}.amazonaws.com",
    +		},
    +		"*/s3/dualstack": {
    +			Endpoint: "s3.dualstack.{region}.amazonaws.com",
    +		},
    +		"*/sts": {
    +			Endpoint:      "sts.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"*/waf": {
    +			Endpoint:      "waf.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"cn-north-1/*": {
    +			Endpoint: "{service}.{region}.amazonaws.com.cn",
    +		},
    +		"cn-north-1/ec2metadata": {
    +			Endpoint: "http://169.254.169.254/latest",
    +		},
    +		"eu-central-1/s3": {
    +			Endpoint: "{service}.{region}.amazonaws.com",
    +		},
    +		"us-east-1/s3": {
    +			Endpoint: "s3.amazonaws.com",
    +		},
    +		"us-east-1/sdb": {
    +			Endpoint:      "sdb.amazonaws.com",
    +			SigningRegion: "us-east-1",
    +		},
    +		"us-gov-west-1/ec2metadata": {
    +			Endpoint: "http://169.254.169.254/latest",
    +		},
    +		"us-gov-west-1/iam": {
    +			Endpoint: "iam.us-gov.amazonaws.com",
    +		},
    +		"us-gov-west-1/s3": {
    +			Endpoint: "s3-{region}.amazonaws.com",
    +		},
    +		"us-gov-west-1/sts": {
    +			Endpoint: "sts.us-gov-west-1.amazonaws.com",
    +		},
    +	},
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
    new file mode 100644
    index 0000000..eedc5bd
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
    @@ -0,0 +1,35 @@
    +// Package ec2query provides serialization of AWS EC2 requests and responses.
    +package ec2query
    +
    +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
    +
    +import (
    +	"net/url"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
    +)
    +
    +// BuildHandler is a named request handler for building ec2query protocol requests
    +var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build}
    +
    +// Build builds a request for the EC2 protocol.
    +func Build(r *request.Request) {
    +	body := url.Values{
    +		"Action":  {r.Operation.Name},
    +		"Version": {r.ClientInfo.APIVersion},
    +	}
    +	if err := queryutil.Parse(body, r.Params, true); err != nil {
    +		r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
    +	}
    +
    +	if r.ExpireTime == 0 {
    +		r.HTTPRequest.Method = "POST"
    +		r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
    +		r.SetBufferBody([]byte(body.Encode()))
    +	} else { // This is a pre-signed request
    +		r.HTTPRequest.Method = "GET"
    +		r.HTTPRequest.URL.RawQuery = body.Encode()
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
    new file mode 100644
    index 0000000..095e97c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
    @@ -0,0 +1,63 @@
    +package ec2query
    +
    +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
    +
    +import (
    +	"encoding/xml"
    +	"io"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
    +)
    +
    +// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests
    +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal}
    +
    +// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata
    +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta}
    +
    +// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors
    +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError}
    +
    +// Unmarshal unmarshals a response body for the EC2 protocol.
    +func Unmarshal(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +	if r.DataFilled() {
    +		decoder := xml.NewDecoder(r.HTTPResponse.Body)
    +		err := xmlutil.UnmarshalXML(r.Data, decoder, "")
    +		if err != nil {
    +			r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err)
    +			return
    +		}
    +	}
    +}
    +
    +// UnmarshalMeta unmarshals response headers for the EC2 protocol.
    +func UnmarshalMeta(r *request.Request) {
    +	// TODO implement unmarshaling of request IDs
    +}
    +
    +type xmlErrorResponse struct {
    +	XMLName   xml.Name `xml:"Response"`
    +	Code      string   `xml:"Errors>Error>Code"`
    +	Message   string   `xml:"Errors>Error>Message"`
    +	RequestID string   `xml:"RequestID"`
    +}
    +
    +// UnmarshalError unmarshals a response error for the EC2 protocol.
    +func UnmarshalError(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +
    +	resp := &xmlErrorResponse{}
    +	err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
    +	if err != nil && err != io.EOF {
    +		r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err)
    +	} else {
    +		r.Error = awserr.NewRequestFailure(
    +			awserr.New(resp.Code, resp.Message, nil),
    +			r.HTTPResponse.StatusCode,
    +			resp.RequestID,
    +		)
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
    new file mode 100644
    index 0000000..53831df
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
    @@ -0,0 +1,75 @@
    +package protocol
    +
    +import (
    +	"crypto/rand"
    +	"fmt"
    +	"reflect"
    +)
    +
    +// RandReader is the random reader the protocol package will use to read
    +// random bytes from. This is exported for testing, and should not be used.
    +var RandReader = rand.Reader
    +
    +const idempotencyTokenFillTag = `idempotencyToken`
    +
    +// CanSetIdempotencyToken returns true if the struct field should be
    +// automatically populated with a Idempotency token.
    +//
    +// Only *string and string type fields that are tagged with idempotencyToken
    +// which are not already set can be auto filled.
    +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
    +	switch u := v.Interface().(type) {
    +	// To auto fill an Idempotency token the field must be a string,
    +	// tagged for auto fill, and have a zero value.
    +	case *string:
    +		return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
    +	case string:
    +		return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
    +	}
    +
    +	return false
    +}
    +
    +// GetIdempotencyToken returns a randomly generated idempotency token.
    +func GetIdempotencyToken() string {
    +	b := make([]byte, 16)
    +	RandReader.Read(b)
    +
    +	return UUIDVersion4(b)
    +}
    +
    +// SetIdempotencyToken will set the value provided with a Idempotency Token.
    +// Given that the value can be set. Will panic if value is not setable.
    +func SetIdempotencyToken(v reflect.Value) {
    +	if v.Kind() == reflect.Ptr {
    +		if v.IsNil() && v.CanSet() {
    +			v.Set(reflect.New(v.Type().Elem()))
    +		}
    +		v = v.Elem()
    +	}
    +	v = reflect.Indirect(v)
    +
    +	if !v.CanSet() {
    +		panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
    +	}
    +
    +	b := make([]byte, 16)
    +	_, err := rand.Read(b)
    +	if err != nil {
    +		// TODO handle error
    +		return
    +	}
    +
    +	v.Set(reflect.ValueOf(UUIDVersion4(b)))
    +}
    +
    +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
    +func UUIDVersion4(u []byte) string {
    +	// https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
    +	// 13th character is "4"
    +	u[6] = (u[6] | 0x40) & 0x4F
    +	// 17th character is "8", "9", "a", or "b"
    +	u[8] = (u[8] | 0x80) & 0xBF
    +
    +	return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
    new file mode 100644
    index 0000000..18169f0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
    @@ -0,0 +1,36 @@
    +// Package query provides serialization of AWS query requests, and responses.
    +package query
    +
    +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
    +
    +import (
    +	"net/url"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
    +)
    +
    +// BuildHandler is a named request handler for building query protocol requests
    +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
    +
    +// Build builds a request for an AWS Query service.
    +func Build(r *request.Request) {
    +	body := url.Values{
    +		"Action":  {r.Operation.Name},
    +		"Version": {r.ClientInfo.APIVersion},
    +	}
    +	if err := queryutil.Parse(body, r.Params, false); err != nil {
    +		r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
    +		return
    +	}
    +
    +	if r.ExpireTime == 0 {
    +		r.HTTPRequest.Method = "POST"
    +		r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
    +		r.SetBufferBody([]byte(body.Encode()))
    +	} else { // This is a pre-signed request
    +		r.HTTPRequest.Method = "GET"
    +		r.HTTPRequest.URL.RawQuery = body.Encode()
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
    new file mode 100644
    index 0000000..60ea0bd
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
    @@ -0,0 +1,230 @@
    +package queryutil
    +
    +import (
    +	"encoding/base64"
    +	"fmt"
    +	"net/url"
    +	"reflect"
    +	"sort"
    +	"strconv"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/private/protocol"
    +)
    +
    +// Parse parses an object i and fills a url.Values object. The isEC2 flag
    +// indicates if this is the EC2 Query sub-protocol.
    +func Parse(body url.Values, i interface{}, isEC2 bool) error {
    +	q := queryParser{isEC2: isEC2}
    +	return q.parseValue(body, reflect.ValueOf(i), "", "")
    +}
    +
    +func elemOf(value reflect.Value) reflect.Value {
    +	for value.Kind() == reflect.Ptr {
    +		value = value.Elem()
    +	}
    +	return value
    +}
    +
    +type queryParser struct {
    +	isEC2 bool
    +}
    +
    +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
    +	value = elemOf(value)
    +
    +	// no need to handle zero values
    +	if !value.IsValid() {
    +		return nil
    +	}
    +
    +	t := tag.Get("type")
    +	if t == "" {
    +		switch value.Kind() {
    +		case reflect.Struct:
    +			t = "structure"
    +		case reflect.Slice:
    +			t = "list"
    +		case reflect.Map:
    +			t = "map"
    +		}
    +	}
    +
    +	switch t {
    +	case "structure":
    +		return q.parseStruct(v, value, prefix)
    +	case "list":
    +		return q.parseList(v, value, prefix, tag)
    +	case "map":
    +		return q.parseMap(v, value, prefix, tag)
    +	default:
    +		return q.parseScalar(v, value, prefix, tag)
    +	}
    +}
    +
    +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
    +	if !value.IsValid() {
    +		return nil
    +	}
    +
    +	t := value.Type()
    +	for i := 0; i < value.NumField(); i++ {
    +		elemValue := elemOf(value.Field(i))
    +		field := t.Field(i)
    +
    +		if field.PkgPath != "" {
    +			continue // ignore unexported fields
    +		}
    +
    +		if protocol.CanSetIdempotencyToken(value.Field(i), field) {
    +			token := protocol.GetIdempotencyToken()
    +			elemValue = reflect.ValueOf(token)
    +		}
    +
    +		var name string
    +		if q.isEC2 {
    +			name = field.Tag.Get("queryName")
    +		}
    +		if name == "" {
    +			if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
    +				name = field.Tag.Get("locationNameList")
    +			} else if locName := field.Tag.Get("locationName"); locName != "" {
    +				name = locName
    +			}
    +			if name != "" && q.isEC2 {
    +				name = strings.ToUpper(name[0:1]) + name[1:]
    +			}
    +		}
    +		if name == "" {
    +			name = field.Name
    +		}
    +
    +		if prefix != "" {
    +			name = prefix + "." + name
    +		}
    +
    +		if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
    +	// If it's empty, generate an empty value
    +	if !value.IsNil() && value.Len() == 0 {
    +		v.Set(prefix, "")
    +		return nil
    +	}
    +
    +	// check for unflattened list member
    +	if !q.isEC2 && tag.Get("flattened") == "" {
    +		prefix += ".member"
    +	}
    +
    +	for i := 0; i < value.Len(); i++ {
    +		slicePrefix := prefix
    +		if slicePrefix == "" {
    +			slicePrefix = strconv.Itoa(i + 1)
    +		} else {
    +			slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
    +		}
    +		if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
    +	// If it's empty, generate an empty value
    +	if !value.IsNil() && value.Len() == 0 {
    +		v.Set(prefix, "")
    +		return nil
    +	}
    +
    +	// check for unflattened list member
    +	if !q.isEC2 && tag.Get("flattened") == "" {
    +		prefix += ".entry"
    +	}
    +
    +	// sort keys for improved serialization consistency.
    +	// this is not strictly necessary for protocol support.
    +	mapKeyValues := value.MapKeys()
    +	mapKeys := map[string]reflect.Value{}
    +	mapKeyNames := make([]string, len(mapKeyValues))
    +	for i, mapKey := range mapKeyValues {
    +		name := mapKey.String()
    +		mapKeys[name] = mapKey
    +		mapKeyNames[i] = name
    +	}
    +	sort.Strings(mapKeyNames)
    +
    +	for i, mapKeyName := range mapKeyNames {
    +		mapKey := mapKeys[mapKeyName]
    +		mapValue := value.MapIndex(mapKey)
    +
    +		kname := tag.Get("locationNameKey")
    +		if kname == "" {
    +			kname = "key"
    +		}
    +		vname := tag.Get("locationNameValue")
    +		if vname == "" {
    +			vname = "value"
    +		}
    +
    +		// serialize key
    +		var keyName string
    +		if prefix == "" {
    +			keyName = strconv.Itoa(i+1) + "." + kname
    +		} else {
    +			keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
    +		}
    +
    +		if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
    +			return err
    +		}
    +
    +		// serialize value
    +		var valueName string
    +		if prefix == "" {
    +			valueName = strconv.Itoa(i+1) + "." + vname
    +		} else {
    +			valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
    +		}
    +
    +		if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
    +			return err
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
    +	switch value := r.Interface().(type) {
    +	case string:
    +		v.Set(name, value)
    +	case []byte:
    +		if !r.IsNil() {
    +			v.Set(name, base64.StdEncoding.EncodeToString(value))
    +		}
    +	case bool:
    +		v.Set(name, strconv.FormatBool(value))
    +	case int64:
    +		v.Set(name, strconv.FormatInt(value, 10))
    +	case int:
    +		v.Set(name, strconv.Itoa(value))
    +	case float64:
    +		v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
    +	case float32:
    +		v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
    +	case time.Time:
    +		const ISO8601UTC = "2006-01-02T15:04:05Z"
    +		v.Set(name, value.UTC().Format(ISO8601UTC))
    +	default:
    +		return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
    new file mode 100644
    index 0000000..e0f4d5a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
    @@ -0,0 +1,35 @@
    +package query
    +
    +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
    +
    +import (
    +	"encoding/xml"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
    +)
    +
    +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
    +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
    +
    +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
    +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
    +
    +// Unmarshal unmarshals a response for an AWS Query service.
    +func Unmarshal(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +	if r.DataFilled() {
    +		decoder := xml.NewDecoder(r.HTTPResponse.Body)
    +		err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
    +		if err != nil {
    +			r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
    +			return
    +		}
    +	}
    +}
    +
    +// UnmarshalMeta unmarshals header response values for an AWS Query service.
    +func UnmarshalMeta(r *request.Request) {
    +	r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
    new file mode 100644
    index 0000000..f214296
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
    @@ -0,0 +1,66 @@
    +package query
    +
    +import (
    +	"encoding/xml"
    +	"io/ioutil"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +type xmlErrorResponse struct {
    +	XMLName   xml.Name `xml:"ErrorResponse"`
    +	Code      string   `xml:"Error>Code"`
    +	Message   string   `xml:"Error>Message"`
    +	RequestID string   `xml:"RequestId"`
    +}
    +
    +type xmlServiceUnavailableResponse struct {
    +	XMLName xml.Name `xml:"ServiceUnavailableException"`
    +}
    +
    +// UnmarshalErrorHandler is a name request handler to unmarshal request errors
    +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
    +
    +// UnmarshalError unmarshals an error response for an AWS Query service.
    +func UnmarshalError(r *request.Request) {
    +	defer r.HTTPResponse.Body.Close()
    +
    +	bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
    +	if err != nil {
    +		r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
    +		return
    +	}
    +
    +	// First check for specific error
    +	resp := xmlErrorResponse{}
    +	decodeErr := xml.Unmarshal(bodyBytes, &resp)
    +	if decodeErr == nil {
    +		reqID := resp.RequestID
    +		if reqID == "" {
    +			reqID = r.RequestID
    +		}
    +		r.Error = awserr.NewRequestFailure(
    +			awserr.New(resp.Code, resp.Message, nil),
    +			r.HTTPResponse.StatusCode,
    +			reqID,
    +		)
    +		return
    +	}
    +
    +	// Check for unhandled error
    +	servUnavailResp := xmlServiceUnavailableResponse{}
    +	unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
    +	if unavailErr == nil {
    +		r.Error = awserr.NewRequestFailure(
    +			awserr.New("ServiceUnavailableException", "service is unavailable", nil),
    +			r.HTTPResponse.StatusCode,
    +			r.RequestID,
    +		)
    +		return
    +	}
    +
    +	// Failed to retrieve any error message from the response body
    +	r.Error = awserr.New("SerializationError",
    +		"failed to decode query XML error response", decodeErr)
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
    new file mode 100644
    index 0000000..5f41251
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
    @@ -0,0 +1,256 @@
    +// Package rest provides RESTful serialization of AWS requests and responses.
    +package rest
    +
    +import (
    +	"bytes"
    +	"encoding/base64"
    +	"fmt"
    +	"io"
    +	"net/http"
    +	"net/url"
    +	"path"
    +	"reflect"
    +	"strconv"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// RFC822 returns an RFC822 formatted timestamp for AWS protocols
    +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
    +
    +// Whether the byte value can be sent without escaping in AWS URLs
    +var noEscape [256]bool
    +
    +var errValueNotSet = fmt.Errorf("value not set")
    +
    +func init() {
    +	for i := 0; i < len(noEscape); i++ {
    +		// AWS expects every character except these to be escaped
    +		noEscape[i] = (i >= 'A' && i <= 'Z') ||
    +			(i >= 'a' && i <= 'z') ||
    +			(i >= '0' && i <= '9') ||
    +			i == '-' ||
    +			i == '.' ||
    +			i == '_' ||
    +			i == '~'
    +	}
    +}
    +
    +// BuildHandler is a named request handler for building rest protocol requests
    +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
    +
    +// Build builds the REST component of a service request.
    +func Build(r *request.Request) {
    +	if r.ParamsFilled() {
    +		v := reflect.ValueOf(r.Params).Elem()
    +		buildLocationElements(r, v)
    +		buildBody(r, v)
    +	}
    +}
    +
    +func buildLocationElements(r *request.Request, v reflect.Value) {
    +	query := r.HTTPRequest.URL.Query()
    +
    +	for i := 0; i < v.NumField(); i++ {
    +		m := v.Field(i)
    +		if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
    +			continue
    +		}
    +
    +		if m.IsValid() {
    +			field := v.Type().Field(i)
    +			name := field.Tag.Get("locationName")
    +			if name == "" {
    +				name = field.Name
    +			}
    +			if m.Kind() == reflect.Ptr {
    +				m = m.Elem()
    +			}
    +			if !m.IsValid() {
    +				continue
    +			}
    +
    +			var err error
    +			switch field.Tag.Get("location") {
    +			case "headers": // header maps
    +				err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName"))
    +			case "header":
    +				err = buildHeader(&r.HTTPRequest.Header, m, name)
    +			case "uri":
    +				err = buildURI(r.HTTPRequest.URL, m, name)
    +			case "querystring":
    +				err = buildQueryString(query, m, name)
    +			}
    +			r.Error = err
    +		}
    +		if r.Error != nil {
    +			return
    +		}
    +	}
    +
    +	r.HTTPRequest.URL.RawQuery = query.Encode()
    +	updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
    +}
    +
    +func buildBody(r *request.Request, v reflect.Value) {
    +	if field, ok := v.Type().FieldByName("_"); ok {
    +		if payloadName := field.Tag.Get("payload"); payloadName != "" {
    +			pfield, _ := v.Type().FieldByName(payloadName)
    +			if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
    +				payload := reflect.Indirect(v.FieldByName(payloadName))
    +				if payload.IsValid() && payload.Interface() != nil {
    +					switch reader := payload.Interface().(type) {
    +					case io.ReadSeeker:
    +						r.SetReaderBody(reader)
    +					case []byte:
    +						r.SetBufferBody(reader)
    +					case string:
    +						r.SetStringBody(reader)
    +					default:
    +						r.Error = awserr.New("SerializationError",
    +							"failed to encode REST request",
    +							fmt.Errorf("unknown payload type %s", payload.Type()))
    +					}
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +func buildHeader(header *http.Header, v reflect.Value, name string) error {
    +	str, err := convertType(v)
    +	if err == errValueNotSet {
    +		return nil
    +	} else if err != nil {
    +		return awserr.New("SerializationError", "failed to encode REST request", err)
    +	}
    +
    +	header.Add(name, str)
    +
    +	return nil
    +}
    +
    +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
    +	for _, key := range v.MapKeys() {
    +		str, err := convertType(v.MapIndex(key))
    +		if err == errValueNotSet {
    +			continue
    +		} else if err != nil {
    +			return awserr.New("SerializationError", "failed to encode REST request", err)
    +
    +		}
    +
    +		header.Add(prefix+key.String(), str)
    +	}
    +	return nil
    +}
    +
    +func buildURI(u *url.URL, v reflect.Value, name string) error {
    +	value, err := convertType(v)
    +	if err == errValueNotSet {
    +		return nil
    +	} else if err != nil {
    +		return awserr.New("SerializationError", "failed to encode REST request", err)
    +	}
    +
    +	uri := u.Path
    +	uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1)
    +	uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1)
    +	u.Path = uri
    +
    +	return nil
    +}
    +
    +func buildQueryString(query url.Values, v reflect.Value, name string) error {
    +	switch value := v.Interface().(type) {
    +	case []*string:
    +		for _, item := range value {
    +			query.Add(name, *item)
    +		}
    +	case map[string]*string:
    +		for key, item := range value {
    +			query.Add(key, *item)
    +		}
    +	case map[string][]*string:
    +		for key, items := range value {
    +			for _, item := range items {
    +				query.Add(key, *item)
    +			}
    +		}
    +	default:
    +		str, err := convertType(v)
    +		if err == errValueNotSet {
    +			return nil
    +		} else if err != nil {
    +			return awserr.New("SerializationError", "failed to encode REST request", err)
    +		}
    +		query.Set(name, str)
    +	}
    +
    +	return nil
    +}
    +
    +func updatePath(url *url.URL, urlPath string) {
    +	scheme, query := url.Scheme, url.RawQuery
    +
    +	hasSlash := strings.HasSuffix(urlPath, "/")
    +
    +	// clean up path
    +	urlPath = path.Clean(urlPath)
    +	if hasSlash && !strings.HasSuffix(urlPath, "/") {
    +		urlPath += "/"
    +	}
    +
    +	// get formatted URL minus scheme so we can build this into Opaque
    +	url.Scheme, url.Path, url.RawQuery = "", "", ""
    +	s := url.String()
    +	url.Scheme = scheme
    +	url.RawQuery = query
    +
    +	// build opaque URI
    +	url.Opaque = s + urlPath
    +}
    +
    +// EscapePath escapes part of a URL path in Amazon style
    +func EscapePath(path string, encodeSep bool) string {
    +	var buf bytes.Buffer
    +	for i := 0; i < len(path); i++ {
    +		c := path[i]
    +		if noEscape[c] || (c == '/' && !encodeSep) {
    +			buf.WriteByte(c)
    +		} else {
    +			fmt.Fprintf(&buf, "%%%02X", c)
    +		}
    +	}
    +	return buf.String()
    +}
    +
    +func convertType(v reflect.Value) (string, error) {
    +	v = reflect.Indirect(v)
    +	if !v.IsValid() {
    +		return "", errValueNotSet
    +	}
    +
    +	var str string
    +	switch value := v.Interface().(type) {
    +	case string:
    +		str = value
    +	case []byte:
    +		str = base64.StdEncoding.EncodeToString(value)
    +	case bool:
    +		str = strconv.FormatBool(value)
    +	case int64:
    +		str = strconv.FormatInt(value, 10)
    +	case float64:
    +		str = strconv.FormatFloat(value, 'f', -1, 64)
    +	case time.Time:
    +		str = value.UTC().Format(RFC822)
    +	default:
    +		err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
    +		return "", err
    +	}
    +	return str, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
    new file mode 100644
    index 0000000..4366de2
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
    @@ -0,0 +1,45 @@
    +package rest
    +
    +import "reflect"
    +
    +// PayloadMember returns the payload field member of i if there is one, or nil.
    +func PayloadMember(i interface{}) interface{} {
    +	if i == nil {
    +		return nil
    +	}
    +
    +	v := reflect.ValueOf(i).Elem()
    +	if !v.IsValid() {
    +		return nil
    +	}
    +	if field, ok := v.Type().FieldByName("_"); ok {
    +		if payloadName := field.Tag.Get("payload"); payloadName != "" {
    +			field, _ := v.Type().FieldByName(payloadName)
    +			if field.Tag.Get("type") != "structure" {
    +				return nil
    +			}
    +
    +			payload := v.FieldByName(payloadName)
    +			if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
    +				return payload.Interface()
    +			}
    +		}
    +	}
    +	return nil
    +}
    +
    +// PayloadType returns the type of a payload field member of i if there is one, or "".
    +func PayloadType(i interface{}) string {
    +	v := reflect.Indirect(reflect.ValueOf(i))
    +	if !v.IsValid() {
    +		return ""
    +	}
    +	if field, ok := v.Type().FieldByName("_"); ok {
    +		if payloadName := field.Tag.Get("payload"); payloadName != "" {
    +			if member, ok := v.Type().FieldByName(payloadName); ok {
    +				return member.Tag.Get("type")
    +			}
    +		}
    +	}
    +	return ""
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
    new file mode 100644
    index 0000000..2cba1d9
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
    @@ -0,0 +1,198 @@
    +package rest
    +
    +import (
    +	"encoding/base64"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"reflect"
    +	"strconv"
    +	"strings"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
    +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
    +
    +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
    +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
    +
    +// Unmarshal unmarshals the REST component of a response in a REST service.
    +func Unmarshal(r *request.Request) {
    +	if r.DataFilled() {
    +		v := reflect.Indirect(reflect.ValueOf(r.Data))
    +		unmarshalBody(r, v)
    +	}
    +}
    +
    +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
    +func UnmarshalMeta(r *request.Request) {
    +	r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
    +	if r.RequestID == "" {
    +		// Alternative version of request id in the header
    +		r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
    +	}
    +	if r.DataFilled() {
    +		v := reflect.Indirect(reflect.ValueOf(r.Data))
    +		unmarshalLocationElements(r, v)
    +	}
    +}
    +
    +func unmarshalBody(r *request.Request, v reflect.Value) {
    +	if field, ok := v.Type().FieldByName("_"); ok {
    +		if payloadName := field.Tag.Get("payload"); payloadName != "" {
    +			pfield, _ := v.Type().FieldByName(payloadName)
    +			if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
    +				payload := v.FieldByName(payloadName)
    +				if payload.IsValid() {
    +					switch payload.Interface().(type) {
    +					case []byte:
    +						defer r.HTTPResponse.Body.Close()
    +						b, err := ioutil.ReadAll(r.HTTPResponse.Body)
    +						if err != nil {
    +							r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
    +						} else {
    +							payload.Set(reflect.ValueOf(b))
    +						}
    +					case *string:
    +						defer r.HTTPResponse.Body.Close()
    +						b, err := ioutil.ReadAll(r.HTTPResponse.Body)
    +						if err != nil {
    +							r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
    +						} else {
    +							str := string(b)
    +							payload.Set(reflect.ValueOf(&str))
    +						}
    +					default:
    +						switch payload.Type().String() {
    +						case "io.ReadSeeker":
    +							payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
    +						case "aws.ReadSeekCloser", "io.ReadCloser":
    +							payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
    +						default:
    +							io.Copy(ioutil.Discard, r.HTTPResponse.Body)
    +							defer r.HTTPResponse.Body.Close()
    +							r.Error = awserr.New("SerializationError",
    +								"failed to decode REST response",
    +								fmt.Errorf("unknown payload type %s", payload.Type()))
    +						}
    +					}
    +				}
    +			}
    +		}
    +	}
    +}
    +
    +func unmarshalLocationElements(r *request.Request, v reflect.Value) {
    +	for i := 0; i < v.NumField(); i++ {
    +		m, field := v.Field(i), v.Type().Field(i)
    +		if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
    +			continue
    +		}
    +
    +		if m.IsValid() {
    +			name := field.Tag.Get("locationName")
    +			if name == "" {
    +				name = field.Name
    +			}
    +
    +			switch field.Tag.Get("location") {
    +			case "statusCode":
    +				unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
    +			case "header":
    +				err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
    +				if err != nil {
    +					r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
    +					break
    +				}
    +			case "headers":
    +				prefix := field.Tag.Get("locationName")
    +				err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
    +				if err != nil {
    +					r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
    +					break
    +				}
    +			}
    +		}
    +		if r.Error != nil {
    +			return
    +		}
    +	}
    +}
    +
    +func unmarshalStatusCode(v reflect.Value, statusCode int) {
    +	if !v.IsValid() {
    +		return
    +	}
    +
    +	switch v.Interface().(type) {
    +	case *int64:
    +		s := int64(statusCode)
    +		v.Set(reflect.ValueOf(&s))
    +	}
    +}
    +
    +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
    +	switch r.Interface().(type) {
    +	case map[string]*string: // we only support string map value types
    +		out := map[string]*string{}
    +		for k, v := range headers {
    +			k = http.CanonicalHeaderKey(k)
    +			if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
    +				out[k[len(prefix):]] = &v[0]
    +			}
    +		}
    +		r.Set(reflect.ValueOf(out))
    +	}
    +	return nil
    +}
    +
    +func unmarshalHeader(v reflect.Value, header string) error {
    +	if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
    +		return nil
    +	}
    +
    +	switch v.Interface().(type) {
    +	case *string:
    +		v.Set(reflect.ValueOf(&header))
    +	case []byte:
    +		b, err := base64.StdEncoding.DecodeString(header)
    +		if err != nil {
    +			return err
    +		}
    +		v.Set(reflect.ValueOf(&b))
    +	case *bool:
    +		b, err := strconv.ParseBool(header)
    +		if err != nil {
    +			return err
    +		}
    +		v.Set(reflect.ValueOf(&b))
    +	case *int64:
    +		i, err := strconv.ParseInt(header, 10, 64)
    +		if err != nil {
    +			return err
    +		}
    +		v.Set(reflect.ValueOf(&i))
    +	case *float64:
    +		f, err := strconv.ParseFloat(header, 64)
    +		if err != nil {
    +			return err
    +		}
    +		v.Set(reflect.ValueOf(&f))
    +	case *time.Time:
    +		t, err := time.Parse(RFC822, header)
    +		if err != nil {
    +			return err
    +		}
    +		v.Set(reflect.ValueOf(&t))
    +	default:
    +		err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
    +		return err
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
    new file mode 100644
    index 0000000..da1a681
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
    @@ -0,0 +1,21 @@
    +package protocol
    +
    +import (
    +	"io"
    +	"io/ioutil"
    +
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
    +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
    +
    +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
    +func UnmarshalDiscardBody(r *request.Request) {
    +	if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
    +		return
    +	}
    +
    +	io.Copy(ioutil.Discard, r.HTTPResponse.Body)
    +	r.HTTPResponse.Body.Close()
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
    new file mode 100644
    index 0000000..221029b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
    @@ -0,0 +1,293 @@
    +// Package xmlutil provides XML serialization of AWS requests and responses.
    +package xmlutil
    +
    +import (
    +	"encoding/base64"
    +	"encoding/xml"
    +	"fmt"
    +	"reflect"
    +	"sort"
    +	"strconv"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/private/protocol"
    +)
    +
    +// BuildXML will serialize params into an xml.Encoder.
    +// Error will be returned if the serialization of any of the params or nested values fails.
    +func BuildXML(params interface{}, e *xml.Encoder) error {
    +	b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
    +	root := NewXMLElement(xml.Name{})
    +	if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
    +		return err
    +	}
    +	for _, c := range root.Children {
    +		for _, v := range c {
    +			return StructToXML(e, v, false)
    +		}
    +	}
    +	return nil
    +}
    +
    +// Returns the reflection element of a value, if it is a pointer.
    +func elemOf(value reflect.Value) reflect.Value {
    +	for value.Kind() == reflect.Ptr {
    +		value = value.Elem()
    +	}
    +	return value
    +}
    +
    +// A xmlBuilder serializes values from Go code to XML
    +type xmlBuilder struct {
    +	encoder    *xml.Encoder
    +	namespaces map[string]string
    +}
    +
    +// buildValue generic XMLNode builder for any type. Will build value for their specific type
    +// struct, list, map, scalar.
    +//
    +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
    +// type is not provided reflect will be used to determine the value's type.
    +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
    +	value = elemOf(value)
    +	if !value.IsValid() { // no need to handle zero values
    +		return nil
    +	} else if tag.Get("location") != "" { // don't handle non-body location values
    +		return nil
    +	}
    +
    +	t := tag.Get("type")
    +	if t == "" {
    +		switch value.Kind() {
    +		case reflect.Struct:
    +			t = "structure"
    +		case reflect.Slice:
    +			t = "list"
    +		case reflect.Map:
    +			t = "map"
    +		}
    +	}
    +
    +	switch t {
    +	case "structure":
    +		if field, ok := value.Type().FieldByName("_"); ok {
    +			tag = tag + reflect.StructTag(" ") + field.Tag
    +		}
    +		return b.buildStruct(value, current, tag)
    +	case "list":
    +		return b.buildList(value, current, tag)
    +	case "map":
    +		return b.buildMap(value, current, tag)
    +	default:
    +		return b.buildScalar(value, current, tag)
    +	}
    +}
    +
    +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
    +// types are converted to XMLNodes also.
    +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
    +	if !value.IsValid() {
    +		return nil
    +	}
    +
    +	fieldAdded := false
    +
    +	// unwrap payloads
    +	if payload := tag.Get("payload"); payload != "" {
    +		field, _ := value.Type().FieldByName(payload)
    +		tag = field.Tag
    +		value = elemOf(value.FieldByName(payload))
    +
    +		if !value.IsValid() {
    +			return nil
    +		}
    +	}
    +
    +	child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
    +
    +	// there is an xmlNamespace associated with this struct
    +	if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
    +		ns := xml.Attr{
    +			Name:  xml.Name{Local: "xmlns"},
    +			Value: uri,
    +		}
    +		if prefix != "" {
    +			b.namespaces[prefix] = uri // register the namespace
    +			ns.Name.Local = "xmlns:" + prefix
    +		}
    +
    +		child.Attr = append(child.Attr, ns)
    +	}
    +
    +	t := value.Type()
    +	for i := 0; i < value.NumField(); i++ {
    +		member := elemOf(value.Field(i))
    +		field := t.Field(i)
    +
    +		if field.PkgPath != "" {
    +			continue // ignore unexported fields
    +		}
    +
    +		mTag := field.Tag
    +		if mTag.Get("location") != "" { // skip non-body members
    +			continue
    +		}
    +
    +		if protocol.CanSetIdempotencyToken(value.Field(i), field) {
    +			token := protocol.GetIdempotencyToken()
    +			member = reflect.ValueOf(token)
    +		}
    +
    +		memberName := mTag.Get("locationName")
    +		if memberName == "" {
    +			memberName = field.Name
    +			mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
    +		}
    +		if err := b.buildValue(member, child, mTag); err != nil {
    +			return err
    +		}
    +
    +		fieldAdded = true
    +	}
    +
    +	if fieldAdded { // only append this child if we have one ore more valid members
    +		current.AddChild(child)
    +	}
    +
    +	return nil
    +}
    +
    +// buildList adds the value's list items to the current XMLNode as children nodes. All
    +// nested values in the list are converted to XMLNodes also.
    +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
    +	if value.IsNil() { // don't build omitted lists
    +		return nil
    +	}
    +
    +	// check for unflattened list member
    +	flattened := tag.Get("flattened") != ""
    +
    +	xname := xml.Name{Local: tag.Get("locationName")}
    +	if flattened {
    +		for i := 0; i < value.Len(); i++ {
    +			child := NewXMLElement(xname)
    +			current.AddChild(child)
    +			if err := b.buildValue(value.Index(i), child, ""); err != nil {
    +				return err
    +			}
    +		}
    +	} else {
    +		list := NewXMLElement(xname)
    +		current.AddChild(list)
    +
    +		for i := 0; i < value.Len(); i++ {
    +			iname := tag.Get("locationNameList")
    +			if iname == "" {
    +				iname = "member"
    +			}
    +
    +			child := NewXMLElement(xml.Name{Local: iname})
    +			list.AddChild(child)
    +			if err := b.buildValue(value.Index(i), child, ""); err != nil {
    +				return err
    +			}
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
    +// nested values in the map are converted to XMLNodes also.
    +//
    +// Error will be returned if it is unable to build the map's values into XMLNodes
    +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
    +	if value.IsNil() { // don't build omitted maps
    +		return nil
    +	}
    +
    +	maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
    +	current.AddChild(maproot)
    +	current = maproot
    +
    +	kname, vname := "key", "value"
    +	if n := tag.Get("locationNameKey"); n != "" {
    +		kname = n
    +	}
    +	if n := tag.Get("locationNameValue"); n != "" {
    +		vname = n
    +	}
    +
    +	// sorting is not required for compliance, but it makes testing easier
    +	keys := make([]string, value.Len())
    +	for i, k := range value.MapKeys() {
    +		keys[i] = k.String()
    +	}
    +	sort.Strings(keys)
    +
    +	for _, k := range keys {
    +		v := value.MapIndex(reflect.ValueOf(k))
    +
    +		mapcur := current
    +		if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
    +			child := NewXMLElement(xml.Name{Local: "entry"})
    +			mapcur.AddChild(child)
    +			mapcur = child
    +		}
    +
    +		kchild := NewXMLElement(xml.Name{Local: kname})
    +		kchild.Text = k
    +		vchild := NewXMLElement(xml.Name{Local: vname})
    +		mapcur.AddChild(kchild)
    +		mapcur.AddChild(vchild)
    +
    +		if err := b.buildValue(v, vchild, ""); err != nil {
    +			return err
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +// buildScalar will convert the value into a string and append it as a attribute or child
    +// of the current XMLNode.
    +//
    +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
    +//
    +// Error will be returned if the value type is unsupported.
    +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
    +	var str string
    +	switch converted := value.Interface().(type) {
    +	case string:
    +		str = converted
    +	case []byte:
    +		if !value.IsNil() {
    +			str = base64.StdEncoding.EncodeToString(converted)
    +		}
    +	case bool:
    +		str = strconv.FormatBool(converted)
    +	case int64:
    +		str = strconv.FormatInt(converted, 10)
    +	case int:
    +		str = strconv.Itoa(converted)
    +	case float64:
    +		str = strconv.FormatFloat(converted, 'f', -1, 64)
    +	case float32:
    +		str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
    +	case time.Time:
    +		const ISO8601UTC = "2006-01-02T15:04:05Z"
    +		str = converted.UTC().Format(ISO8601UTC)
    +	default:
    +		return fmt.Errorf("unsupported value for param %s: %v (%s)",
    +			tag.Get("locationName"), value.Interface(), value.Type().Name())
    +	}
    +
    +	xname := xml.Name{Local: tag.Get("locationName")}
    +	if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
    +		attr := xml.Attr{Name: xname, Value: str}
    +		current.Attr = append(current.Attr, attr)
    +	} else { // regular text node
    +		current.AddChild(&XMLNode{Name: xname, Text: str})
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
    new file mode 100644
    index 0000000..49f291a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
    @@ -0,0 +1,260 @@
    +package xmlutil
    +
    +import (
    +	"encoding/base64"
    +	"encoding/xml"
    +	"fmt"
    +	"io"
    +	"reflect"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +// UnmarshalXML deserializes an xml.Decoder into the container v. V
    +// needs to match the shape of the XML expected to be decoded.
    +// If the shape doesn't match unmarshaling will fail.
    +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
    +	n, _ := XMLToStruct(d, nil)
    +	if n.Children != nil {
    +		for _, root := range n.Children {
    +			for _, c := range root {
    +				if wrappedChild, ok := c.Children[wrapper]; ok {
    +					c = wrappedChild[0] // pull out wrapped element
    +				}
    +
    +				err := parse(reflect.ValueOf(v), c, "")
    +				if err != nil {
    +					if err == io.EOF {
    +						return nil
    +					}
    +					return err
    +				}
    +			}
    +		}
    +		return nil
    +	}
    +	return nil
    +}
    +
    +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
    +// will be used to determine the type from r.
    +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
    +	rtype := r.Type()
    +	if rtype.Kind() == reflect.Ptr {
    +		rtype = rtype.Elem() // check kind of actual element type
    +	}
    +
    +	t := tag.Get("type")
    +	if t == "" {
    +		switch rtype.Kind() {
    +		case reflect.Struct:
    +			t = "structure"
    +		case reflect.Slice:
    +			t = "list"
    +		case reflect.Map:
    +			t = "map"
    +		}
    +	}
    +
    +	switch t {
    +	case "structure":
    +		if field, ok := rtype.FieldByName("_"); ok {
    +			tag = field.Tag
    +		}
    +		return parseStruct(r, node, tag)
    +	case "list":
    +		return parseList(r, node, tag)
    +	case "map":
    +		return parseMap(r, node, tag)
    +	default:
    +		return parseScalar(r, node, tag)
    +	}
    +}
    +
    +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
    +// types in the structure will also be deserialized.
    +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
    +	t := r.Type()
    +	if r.Kind() == reflect.Ptr {
    +		if r.IsNil() { // create the structure if it's nil
    +			s := reflect.New(r.Type().Elem())
    +			r.Set(s)
    +			r = s
    +		}
    +
    +		r = r.Elem()
    +		t = t.Elem()
    +	}
    +
    +	// unwrap any payloads
    +	if payload := tag.Get("payload"); payload != "" {
    +		field, _ := t.FieldByName(payload)
    +		return parseStruct(r.FieldByName(payload), node, field.Tag)
    +	}
    +
    +	for i := 0; i < t.NumField(); i++ {
    +		field := t.Field(i)
    +		if c := field.Name[0:1]; strings.ToLower(c) == c {
    +			continue // ignore unexported fields
    +		}
    +
    +		// figure out what this field is called
    +		name := field.Name
    +		if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
    +			name = field.Tag.Get("locationNameList")
    +		} else if locName := field.Tag.Get("locationName"); locName != "" {
    +			name = locName
    +		}
    +
    +		// try to find the field by name in elements
    +		elems := node.Children[name]
    +
    +		if elems == nil { // try to find the field in attributes
    +			for _, a := range node.Attr {
    +				if name == a.Name.Local {
    +					// turn this into a text node for de-serializing
    +					elems = []*XMLNode{{Text: a.Value}}
    +				}
    +			}
    +		}
    +
    +		member := r.FieldByName(field.Name)
    +		for _, elem := range elems {
    +			err := parse(member, elem, field.Tag)
    +			if err != nil {
    +				return err
    +			}
    +		}
    +	}
    +	return nil
    +}
    +
    +// parseList deserializes a list of values from an XML node. Each list entry
    +// will also be deserialized.
    +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
    +	t := r.Type()
    +
    +	if tag.Get("flattened") == "" { // look at all item entries
    +		mname := "member"
    +		if name := tag.Get("locationNameList"); name != "" {
    +			mname = name
    +		}
    +
    +		if Children, ok := node.Children[mname]; ok {
    +			if r.IsNil() {
    +				r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
    +			}
    +
    +			for i, c := range Children {
    +				err := parse(r.Index(i), c, "")
    +				if err != nil {
    +					return err
    +				}
    +			}
    +		}
    +	} else { // flattened list means this is a single element
    +		if r.IsNil() {
    +			r.Set(reflect.MakeSlice(t, 0, 0))
    +		}
    +
    +		childR := reflect.Zero(t.Elem())
    +		r.Set(reflect.Append(r, childR))
    +		err := parse(r.Index(r.Len()-1), node, "")
    +		if err != nil {
    +			return err
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
    +// will also be deserialized as map entries.
    +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
    +	if r.IsNil() {
    +		r.Set(reflect.MakeMap(r.Type()))
    +	}
    +
    +	if tag.Get("flattened") == "" { // look at all child entries
    +		for _, entry := range node.Children["entry"] {
    +			parseMapEntry(r, entry, tag)
    +		}
    +	} else { // this element is itself an entry
    +		parseMapEntry(r, node, tag)
    +	}
    +
    +	return nil
    +}
    +
    +// parseMapEntry deserializes a map entry from a XML node.
    +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
    +	kname, vname := "key", "value"
    +	if n := tag.Get("locationNameKey"); n != "" {
    +		kname = n
    +	}
    +	if n := tag.Get("locationNameValue"); n != "" {
    +		vname = n
    +	}
    +
    +	keys, ok := node.Children[kname]
    +	values := node.Children[vname]
    +	if ok {
    +		for i, key := range keys {
    +			keyR := reflect.ValueOf(key.Text)
    +			value := values[i]
    +			valueR := reflect.New(r.Type().Elem()).Elem()
    +
    +			parse(valueR, value, "")
    +			r.SetMapIndex(keyR, valueR)
    +		}
    +	}
    +	return nil
    +}
    +
    +// parseScaller deserializes an XMLNode value into a concrete type based on the
    +// interface type of r.
    +//
    +// Error is returned if the deserialization fails due to invalid type conversion,
    +// or unsupported interface type.
    +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
    +	switch r.Interface().(type) {
    +	case *string:
    +		r.Set(reflect.ValueOf(&node.Text))
    +		return nil
    +	case []byte:
    +		b, err := base64.StdEncoding.DecodeString(node.Text)
    +		if err != nil {
    +			return err
    +		}
    +		r.Set(reflect.ValueOf(b))
    +	case *bool:
    +		v, err := strconv.ParseBool(node.Text)
    +		if err != nil {
    +			return err
    +		}
    +		r.Set(reflect.ValueOf(&v))
    +	case *int64:
    +		v, err := strconv.ParseInt(node.Text, 10, 64)
    +		if err != nil {
    +			return err
    +		}
    +		r.Set(reflect.ValueOf(&v))
    +	case *float64:
    +		v, err := strconv.ParseFloat(node.Text, 64)
    +		if err != nil {
    +			return err
    +		}
    +		r.Set(reflect.ValueOf(&v))
    +	case *time.Time:
    +		const ISO8601UTC = "2006-01-02T15:04:05Z"
    +		t, err := time.Parse(ISO8601UTC, node.Text)
    +		if err != nil {
    +			return err
    +		}
    +		r.Set(reflect.ValueOf(&t))
    +	default:
    +		return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
    new file mode 100644
    index 0000000..72c198a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
    @@ -0,0 +1,105 @@
    +package xmlutil
    +
    +import (
    +	"encoding/xml"
    +	"io"
    +	"sort"
    +)
    +
    +// A XMLNode contains the values to be encoded or decoded.
    +type XMLNode struct {
    +	Name     xml.Name              `json:",omitempty"`
    +	Children map[string][]*XMLNode `json:",omitempty"`
    +	Text     string                `json:",omitempty"`
    +	Attr     []xml.Attr            `json:",omitempty"`
    +}
    +
    +// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
    +func NewXMLElement(name xml.Name) *XMLNode {
    +	return &XMLNode{
    +		Name:     name,
    +		Children: map[string][]*XMLNode{},
    +		Attr:     []xml.Attr{},
    +	}
    +}
    +
    +// AddChild adds child to the XMLNode.
    +func (n *XMLNode) AddChild(child *XMLNode) {
    +	if _, ok := n.Children[child.Name.Local]; !ok {
    +		n.Children[child.Name.Local] = []*XMLNode{}
    +	}
    +	n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
    +}
    +
    +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
    +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
    +	out := &XMLNode{}
    +	for {
    +		tok, err := d.Token()
    +		if tok == nil || err == io.EOF {
    +			break
    +		}
    +		if err != nil {
    +			return out, err
    +		}
    +
    +		switch typed := tok.(type) {
    +		case xml.CharData:
    +			out.Text = string(typed.Copy())
    +		case xml.StartElement:
    +			el := typed.Copy()
    +			out.Attr = el.Attr
    +			if out.Children == nil {
    +				out.Children = map[string][]*XMLNode{}
    +			}
    +
    +			name := typed.Name.Local
    +			slice := out.Children[name]
    +			if slice == nil {
    +				slice = []*XMLNode{}
    +			}
    +			node, e := XMLToStruct(d, &el)
    +			if e != nil {
    +				return out, e
    +			}
    +			node.Name = typed.Name
    +			slice = append(slice, node)
    +			out.Children[name] = slice
    +		case xml.EndElement:
    +			if s != nil && s.Name.Local == typed.Name.Local { // matching end token
    +				return out, nil
    +			}
    +		}
    +	}
    +	return out, nil
    +}
    +
    +// StructToXML writes an XMLNode to a xml.Encoder as tokens.
    +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
    +	e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
    +
    +	if node.Text != "" {
    +		e.EncodeToken(xml.CharData([]byte(node.Text)))
    +	} else if sorted {
    +		sortedNames := []string{}
    +		for k := range node.Children {
    +			sortedNames = append(sortedNames, k)
    +		}
    +		sort.Strings(sortedNames)
    +
    +		for _, k := range sortedNames {
    +			for _, v := range node.Children[k] {
    +				StructToXML(e, v, sorted)
    +			}
    +		}
    +	} else {
    +		for _, c := range node.Children {
    +			for _, v := range c {
    +				StructToXML(e, v, sorted)
    +			}
    +		}
    +	}
    +
    +	e.EncodeToken(xml.EndElement{Name: node.Name})
    +	return e.Flush()
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
    new file mode 100644
    index 0000000..b51e944
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
    @@ -0,0 +1,134 @@
    +package waiter
    +
    +import (
    +	"fmt"
    +	"reflect"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awserr"
    +	"github.com/aws/aws-sdk-go/aws/awsutil"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +// A Config provides a collection of configuration values to setup a generated
    +// waiter code with.
    +type Config struct {
    +	Name        string
    +	Delay       int
    +	MaxAttempts int
    +	Operation   string
    +	Acceptors   []WaitAcceptor
    +}
    +
    +// A WaitAcceptor provides the information needed to wait for an API operation
    +// to complete.
    +type WaitAcceptor struct {
    +	Expected interface{}
    +	Matcher  string
    +	State    string
    +	Argument string
    +}
    +
    +// A Waiter provides waiting for an operation to complete.
    +type Waiter struct {
    +	Config
    +	Client interface{}
    +	Input  interface{}
    +}
    +
    +// Wait waits for an operation to complete, expire max attempts, or fail. Error
    +// is returned if the operation fails.
    +func (w *Waiter) Wait() error {
    +	client := reflect.ValueOf(w.Client)
    +	in := reflect.ValueOf(w.Input)
    +	method := client.MethodByName(w.Config.Operation + "Request")
    +
    +	for i := 0; i < w.MaxAttempts; i++ {
    +		res := method.Call([]reflect.Value{in})
    +		req := res[0].Interface().(*request.Request)
    +		req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter"))
    +
    +		err := req.Send()
    +		for _, a := range w.Acceptors {
    +			result := false
    +			var vals []interface{}
    +			switch a.Matcher {
    +			case "pathAll", "path":
    +				// Require all matches to be equal for result to match
    +				vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
    +				if len(vals) == 0 {
    +					break
    +				}
    +				result = true
    +				for _, val := range vals {
    +					if !awsutil.DeepEqual(val, a.Expected) {
    +						result = false
    +						break
    +					}
    +				}
    +			case "pathAny":
    +				// Only a single match needs to equal for the result to match
    +				vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
    +				for _, val := range vals {
    +					if awsutil.DeepEqual(val, a.Expected) {
    +						result = true
    +						break
    +					}
    +				}
    +			case "status":
    +				s := a.Expected.(int)
    +				result = s == req.HTTPResponse.StatusCode
    +			case "error":
    +				if aerr, ok := err.(awserr.Error); ok {
    +					result = aerr.Code() == a.Expected.(string)
    +				}
    +			case "pathList":
    +				// ignored matcher
    +			default:
    +				logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s",
    +					w.Config.Operation, a.Matcher)
    +			}
    +
    +			if !result {
    +				// If there was no matching result found there is nothing more to do
    +				// for this response, retry the request.
    +				continue
    +			}
    +
    +			switch a.State {
    +			case "success":
    +				// waiter completed
    +				return nil
    +			case "failure":
    +				// Waiter failure state triggered
    +				return awserr.New("ResourceNotReady",
    +					fmt.Sprintf("failed waiting for successful resource state"), err)
    +			case "retry":
    +				// clear the error and retry the operation
    +				err = nil
    +			default:
    +				logf(client, "WARNING: Waiter for %s encountered unexpected state: %s",
    +					w.Config.Operation, a.State)
    +			}
    +		}
    +		if err != nil {
    +			return err
    +		}
    +
    +		time.Sleep(time.Second * time.Duration(w.Delay))
    +	}
    +
    +	return awserr.New("ResourceNotReady",
    +		fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil)
    +}
    +
    +func logf(client reflect.Value, msg string, args ...interface{}) {
    +	cfgVal := client.FieldByName("Config")
    +	if !cfgVal.IsValid() {
    +		return
    +	}
    +	if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil {
    +		cfg.Logger.Log(fmt.Sprintf(msg, args...))
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
    new file mode 100644
    index 0000000..ac3653d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
    @@ -0,0 +1,49964 @@
    +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
    +
    +// Package ec2 provides a client for Amazon Elastic Compute Cloud.
    +package ec2
    +
    +import (
    +	"fmt"
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/awsutil"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/protocol"
    +	"github.com/aws/aws-sdk-go/private/protocol/ec2query"
    +)
    +
    +const opAcceptReservedInstancesExchangeQuote = "AcceptReservedInstancesExchangeQuote"
    +
    +// AcceptReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the
    +// client's request for the AcceptReservedInstancesExchangeQuote operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AcceptReservedInstancesExchangeQuote for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AcceptReservedInstancesExchangeQuote method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AcceptReservedInstancesExchangeQuoteRequest method.
    +//    req, resp := client.AcceptReservedInstancesExchangeQuoteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AcceptReservedInstancesExchangeQuoteRequest(input *AcceptReservedInstancesExchangeQuoteInput) (req *request.Request, output *AcceptReservedInstancesExchangeQuoteOutput) {
    +	op := &request.Operation{
    +		Name:       opAcceptReservedInstancesExchangeQuote,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AcceptReservedInstancesExchangeQuoteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AcceptReservedInstancesExchangeQuoteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AcceptReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud.
    +//
    +// Purchases Convertible Reserved Instance offerings described in the GetReservedInstancesExchangeQuote
    +// call.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AcceptReservedInstancesExchangeQuote for usage and error information.
    +func (c *EC2) AcceptReservedInstancesExchangeQuote(input *AcceptReservedInstancesExchangeQuoteInput) (*AcceptReservedInstancesExchangeQuoteOutput, error) {
    +	req, out := c.AcceptReservedInstancesExchangeQuoteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection"
    +
    +// AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the
    +// client's request for the AcceptVpcPeeringConnection operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AcceptVpcPeeringConnection for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AcceptVpcPeeringConnection method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AcceptVpcPeeringConnectionRequest method.
    +//    req, resp := client.AcceptVpcPeeringConnectionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) {
    +	op := &request.Operation{
    +		Name:       opAcceptVpcPeeringConnection,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AcceptVpcPeeringConnectionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AcceptVpcPeeringConnectionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AcceptVpcPeeringConnection API operation for Amazon Elastic Compute Cloud.
    +//
    +// Accept a VPC peering connection request. To accept a request, the VPC peering
    +// connection must be in the pending-acceptance state, and you must be the owner
    +// of the peer VPC. Use the DescribeVpcPeeringConnections request to view your
    +// outstanding VPC peering connection requests.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AcceptVpcPeeringConnection for usage and error information.
    +func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) {
    +	req, out := c.AcceptVpcPeeringConnectionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAllocateAddress = "AllocateAddress"
    +
    +// AllocateAddressRequest generates a "aws/request.Request" representing the
    +// client's request for the AllocateAddress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AllocateAddress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AllocateAddress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AllocateAddressRequest method.
    +//    req, resp := client.AllocateAddressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) {
    +	op := &request.Operation{
    +		Name:       opAllocateAddress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AllocateAddressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AllocateAddressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AllocateAddress API operation for Amazon Elastic Compute Cloud.
    +//
    +// Acquires an Elastic IP address.
    +//
    +// An Elastic IP address is for use either in the EC2-Classic platform or in
    +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AllocateAddress for usage and error information.
    +func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) {
    +	req, out := c.AllocateAddressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAllocateHosts = "AllocateHosts"
    +
    +// AllocateHostsRequest generates a "aws/request.Request" representing the
    +// client's request for the AllocateHosts operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AllocateHosts for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AllocateHosts method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AllocateHostsRequest method.
    +//    req, resp := client.AllocateHostsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Request, output *AllocateHostsOutput) {
    +	op := &request.Operation{
    +		Name:       opAllocateHosts,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AllocateHostsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AllocateHostsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AllocateHosts API operation for Amazon Elastic Compute Cloud.
    +//
    +// Allocates a Dedicated Host to your account. At minimum you need to specify
    +// the instance size type, Availability Zone, and quantity of hosts you want
    +// to allocate.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AllocateHosts for usage and error information.
    +func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, error) {
    +	req, out := c.AllocateHostsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses"
    +
    +// AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the
    +// client's request for the AssignPrivateIpAddresses operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssignPrivateIpAddresses for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssignPrivateIpAddresses method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssignPrivateIpAddressesRequest method.
    +//    req, resp := client.AssignPrivateIpAddressesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) {
    +	op := &request.Operation{
    +		Name:       opAssignPrivateIpAddresses,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssignPrivateIpAddressesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &AssignPrivateIpAddressesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssignPrivateIpAddresses API operation for Amazon Elastic Compute Cloud.
    +//
    +// Assigns one or more secondary private IP addresses to the specified network
    +// interface. You can specify one or more specific secondary IP addresses, or
    +// you can specify the number of secondary IP addresses to be automatically
    +// assigned within the subnet's CIDR block range. The number of secondary IP
    +// addresses that you can assign to an instance varies by instance type. For
    +// information about instance types, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
    +// in the Amazon Elastic Compute Cloud User Guide. For more information about
    +// Elastic IP addresses, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// AssignPrivateIpAddresses is available only in EC2-VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AssignPrivateIpAddresses for usage and error information.
    +func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) {
    +	req, out := c.AssignPrivateIpAddressesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAssociateAddress = "AssociateAddress"
    +
    +// AssociateAddressRequest generates a "aws/request.Request" representing the
    +// client's request for the AssociateAddress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssociateAddress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssociateAddress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssociateAddressRequest method.
    +//    req, resp := client.AssociateAddressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) {
    +	op := &request.Operation{
    +		Name:       opAssociateAddress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssociateAddressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AssociateAddressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssociateAddress API operation for Amazon Elastic Compute Cloud.
    +//
    +// Associates an Elastic IP address with an instance or a network interface.
    +//
    +// An Elastic IP address is for use in either the EC2-Classic platform or in
    +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is
    +// already associated with a different instance, it is disassociated from that
    +// instance and associated with the specified instance.
    +//
    +// [VPC in an EC2-Classic account] If you don't specify a private IP address,
    +// the Elastic IP address is associated with the primary IP address. If the
    +// Elastic IP address is already associated with a different instance or a network
    +// interface, you get an error unless you allow reassociation.
    +//
    +// This is an idempotent operation. If you perform the operation more than once,
    +// Amazon EC2 doesn't return an error, and you may be charged for each time
    +// the Elastic IP address is remapped to the same instance. For more information,
    +// see the Elastic IP Addresses section of Amazon EC2 Pricing (http://aws.amazon.com/ec2/pricing/).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AssociateAddress for usage and error information.
    +func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) {
    +	req, out := c.AssociateAddressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAssociateDhcpOptions = "AssociateDhcpOptions"
    +
    +// AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the
    +// client's request for the AssociateDhcpOptions operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssociateDhcpOptions for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssociateDhcpOptions method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssociateDhcpOptionsRequest method.
    +//    req, resp := client.AssociateDhcpOptionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) {
    +	op := &request.Operation{
    +		Name:       opAssociateDhcpOptions,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssociateDhcpOptionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &AssociateDhcpOptionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssociateDhcpOptions API operation for Amazon Elastic Compute Cloud.
    +//
    +// Associates a set of DHCP options (that you've previously created) with the
    +// specified VPC, or associates no DHCP options with the VPC.
    +//
    +// After you associate the options with the VPC, any existing instances and
    +// all new instances that you launch in that VPC use the options. You don't
    +// need to restart or relaunch the instances. They automatically pick up the
    +// changes within a few hours, depending on how frequently the instance renews
    +// its DHCP lease. You can explicitly renew the lease using the operating system
    +// on the instance.
    +//
    +// For more information, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AssociateDhcpOptions for usage and error information.
    +func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) {
    +	req, out := c.AssociateDhcpOptionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAssociateRouteTable = "AssociateRouteTable"
    +
    +// AssociateRouteTableRequest generates a "aws/request.Request" representing the
    +// client's request for the AssociateRouteTable operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssociateRouteTable for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssociateRouteTable method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssociateRouteTableRequest method.
    +//    req, resp := client.AssociateRouteTableRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) {
    +	op := &request.Operation{
    +		Name:       opAssociateRouteTable,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssociateRouteTableInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AssociateRouteTableOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssociateRouteTable API operation for Amazon Elastic Compute Cloud.
    +//
    +// Associates a subnet with a route table. The subnet and route table must be
    +// in the same VPC. This association causes traffic originating from the subnet
    +// to be routed according to the routes in the route table. The action returns
    +// an association ID, which you need in order to disassociate the route table
    +// from the subnet later. A route table can be associated with multiple subnets.
    +//
    +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AssociateRouteTable for usage and error information.
    +func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) {
    +	req, out := c.AssociateRouteTableRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAttachClassicLinkVpc = "AttachClassicLinkVpc"
    +
    +// AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the
    +// client's request for the AttachClassicLinkVpc operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AttachClassicLinkVpc for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AttachClassicLinkVpc method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AttachClassicLinkVpcRequest method.
    +//    req, resp := client.AttachClassicLinkVpcRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) {
    +	op := &request.Operation{
    +		Name:       opAttachClassicLinkVpc,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AttachClassicLinkVpcInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AttachClassicLinkVpcOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AttachClassicLinkVpc API operation for Amazon Elastic Compute Cloud.
    +//
    +// Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or
    +// more of the VPC's security groups. You cannot link an EC2-Classic instance
    +// to more than one VPC at a time. You can only link an instance that's in the
    +// running state. An instance is automatically unlinked from a VPC when it's
    +// stopped - you can link it to the VPC again when you restart it.
    +//
    +// After you've linked an instance, you cannot change the VPC security groups
    +// that are associated with it. To change the security groups, you must first
    +// unlink the instance, and then link it again.
    +//
    +// Linking your instance to a VPC is sometimes referred to as attaching your
    +// instance.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AttachClassicLinkVpc for usage and error information.
    +func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) {
    +	req, out := c.AttachClassicLinkVpcRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAttachInternetGateway = "AttachInternetGateway"
    +
    +// AttachInternetGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the AttachInternetGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AttachInternetGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AttachInternetGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AttachInternetGatewayRequest method.
    +//    req, resp := client.AttachInternetGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opAttachInternetGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AttachInternetGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &AttachInternetGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AttachInternetGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Attaches an Internet gateway to a VPC, enabling connectivity between the
    +// Internet and the VPC. For more information about your VPC and Internet gateway,
    +// see the Amazon Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AttachInternetGateway for usage and error information.
    +func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) {
    +	req, out := c.AttachInternetGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAttachNetworkInterface = "AttachNetworkInterface"
    +
    +// AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the
    +// client's request for the AttachNetworkInterface operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AttachNetworkInterface for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AttachNetworkInterface method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AttachNetworkInterfaceRequest method.
    +//    req, resp := client.AttachNetworkInterfaceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) {
    +	op := &request.Operation{
    +		Name:       opAttachNetworkInterface,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AttachNetworkInterfaceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AttachNetworkInterfaceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AttachNetworkInterface API operation for Amazon Elastic Compute Cloud.
    +//
    +// Attaches a network interface to an instance.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AttachNetworkInterface for usage and error information.
    +func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) {
    +	req, out := c.AttachNetworkInterfaceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAttachVolume = "AttachVolume"
    +
    +// AttachVolumeRequest generates a "aws/request.Request" representing the
    +// client's request for the AttachVolume operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AttachVolume for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AttachVolume method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AttachVolumeRequest method.
    +//    req, resp := client.AttachVolumeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) {
    +	op := &request.Operation{
    +		Name:       opAttachVolume,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AttachVolumeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &VolumeAttachment{}
    +	req.Data = output
    +	return
    +}
    +
    +// AttachVolume API operation for Amazon Elastic Compute Cloud.
    +//
    +// Attaches an EBS volume to a running or stopped instance and exposes it to
    +// the instance with the specified device name.
    +//
    +// Encrypted EBS volumes may only be attached to instances that support Amazon
    +// EBS encryption. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// For a list of supported device names, see Attaching an EBS Volume to an Instance
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html).
    +// Any device names that aren't reserved for instance store volumes can be used
    +// for EBS volumes. For more information, see Amazon EC2 Instance Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// If a volume has an AWS Marketplace product code:
    +//
    +//    * The volume can be attached only to a stopped instance.
    +//
    +//    * AWS Marketplace product codes are copied from the volume to the instance.
    +//
    +//    * You must be subscribed to the product.
    +//
    +//    * The instance type and operating system of the instance must support
    +//    the product. For example, you can't detach a volume from a Windows instance
    +//    and attach it to a Linux instance.
    +//
    +// For an overview of the AWS Marketplace, see Introducing AWS Marketplace (https://aws.amazon.com/marketplace/help/200900000).
    +//
    +// For more information about EBS volumes, see Attaching Amazon EBS Volumes
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AttachVolume for usage and error information.
    +func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) {
    +	req, out := c.AttachVolumeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAttachVpnGateway = "AttachVpnGateway"
    +
    +// AttachVpnGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the AttachVpnGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AttachVpnGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AttachVpnGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AttachVpnGatewayRequest method.
    +//    req, resp := client.AttachVpnGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opAttachVpnGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AttachVpnGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AttachVpnGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AttachVpnGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Attaches a virtual private gateway to a VPC. For more information, see Adding
    +// a Hardware Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AttachVpnGateway for usage and error information.
    +func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) {
    +	req, out := c.AttachVpnGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress"
    +
    +// AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the
    +// client's request for the AuthorizeSecurityGroupEgress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AuthorizeSecurityGroupEgress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AuthorizeSecurityGroupEgress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AuthorizeSecurityGroupEgressRequest method.
    +//    req, resp := client.AuthorizeSecurityGroupEgressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) {
    +	op := &request.Operation{
    +		Name:       opAuthorizeSecurityGroupEgress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AuthorizeSecurityGroupEgressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &AuthorizeSecurityGroupEgressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AuthorizeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud.
    +//
    +// [EC2-VPC only] Adds one or more egress rules to a security group for use
    +// with a VPC. Specifically, this action permits instances to send traffic to
    +// one or more destination CIDR IP address ranges, or to one or more destination
    +// security groups for the same VPC. This action doesn't apply to security groups
    +// for use in EC2-Classic. For more information, see Security Groups for Your
    +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// You can have up to 50 rules per security group (covering both ingress and
    +// egress rules).
    +//
    +// Each rule consists of the protocol (for example, TCP), plus either a CIDR
    +// range or a source group. For the TCP and UDP protocols, you must also specify
    +// the destination port or port range. For the ICMP protocol, you must also
    +// specify the ICMP type and code. You can use -1 for the type or code to mean
    +// all types or all codes.
    +//
    +// Rule changes are propagated to affected instances as quickly as possible.
    +// However, a small delay might occur.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AuthorizeSecurityGroupEgress for usage and error information.
    +func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) {
    +	req, out := c.AuthorizeSecurityGroupEgressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress"
    +
    +// AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the
    +// client's request for the AuthorizeSecurityGroupIngress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AuthorizeSecurityGroupIngress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AuthorizeSecurityGroupIngress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AuthorizeSecurityGroupIngressRequest method.
    +//    req, resp := client.AuthorizeSecurityGroupIngressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) {
    +	op := &request.Operation{
    +		Name:       opAuthorizeSecurityGroupIngress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AuthorizeSecurityGroupIngressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &AuthorizeSecurityGroupIngressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AuthorizeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud.
    +//
    +// Adds one or more ingress rules to a security group.
    +//
    +// EC2-Classic: You can have up to 100 rules per group.
    +//
    +// EC2-VPC: You can have up to 50 rules per group (covering both ingress and
    +// egress rules).
    +//
    +// Rule changes are propagated to instances within the security group as quickly
    +// as possible. However, a small delay might occur.
    +//
    +// [EC2-Classic] This action gives one or more CIDR IP address ranges permission
    +// to access a security group in your account, or gives one or more security
    +// groups (called the source groups) permission to access a security group for
    +// your account. A source group can be for your own AWS account, or another.
    +//
    +// [EC2-VPC] This action gives one or more CIDR IP address ranges permission
    +// to access a security group in your VPC, or gives one or more other security
    +// groups (called the source groups) permission to access a security group for
    +// your VPC. The security groups must all be for the same VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation AuthorizeSecurityGroupIngress for usage and error information.
    +func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) {
    +	req, out := c.AuthorizeSecurityGroupIngressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opBundleInstance = "BundleInstance"
    +
    +// BundleInstanceRequest generates a "aws/request.Request" representing the
    +// client's request for the BundleInstance operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See BundleInstance for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the BundleInstance method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the BundleInstanceRequest method.
    +//    req, resp := client.BundleInstanceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) {
    +	op := &request.Operation{
    +		Name:       opBundleInstance,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &BundleInstanceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &BundleInstanceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// BundleInstance API operation for Amazon Elastic Compute Cloud.
    +//
    +// Bundles an Amazon instance store-backed Windows instance.
    +//
    +// During bundling, only the root device volume (C:\) is bundled. Data on other
    +// instance store volumes is not preserved.
    +//
    +// This action is not applicable for Linux/Unix instances or Windows instances
    +// that are backed by Amazon EBS.
    +//
    +// For more information, see Creating an Instance Store-Backed Windows AMI (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_InstanceStoreBacked_WinAMI.html).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation BundleInstance for usage and error information.
    +func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) {
    +	req, out := c.BundleInstanceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelBundleTask = "CancelBundleTask"
    +
    +// CancelBundleTaskRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelBundleTask operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelBundleTask for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelBundleTask method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelBundleTaskRequest method.
    +//    req, resp := client.CancelBundleTaskRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelBundleTask,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelBundleTaskInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CancelBundleTaskOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelBundleTask API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels a bundling operation for an instance store-backed Windows instance.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelBundleTask for usage and error information.
    +func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) {
    +	req, out := c.CancelBundleTaskRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelConversionTask = "CancelConversionTask"
    +
    +// CancelConversionTaskRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelConversionTask operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelConversionTask for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelConversionTask method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelConversionTaskRequest method.
    +//    req, resp := client.CancelConversionTaskRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelConversionTask,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelConversionTaskInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &CancelConversionTaskOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelConversionTask API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels an active conversion task. The task can be the import of an instance
    +// or volume. The action removes all artifacts of the conversion, including
    +// a partially uploaded volume or instance. If the conversion is complete or
    +// is in the process of transferring the final disk image, the command fails
    +// and returns an exception.
    +//
    +// For more information, see Importing a Virtual Machine Using the Amazon EC2
    +// CLI (http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelConversionTask for usage and error information.
    +func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) {
    +	req, out := c.CancelConversionTaskRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelExportTask = "CancelExportTask"
    +
    +// CancelExportTaskRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelExportTask operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelExportTask for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelExportTask method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelExportTaskRequest method.
    +//    req, resp := client.CancelExportTaskRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelExportTask,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelExportTaskInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &CancelExportTaskOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelExportTask API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels an active export task. The request removes all artifacts of the export,
    +// including any partially-created Amazon S3 objects. If the export task is
    +// complete or is in the process of transferring the final disk image, the command
    +// fails and returns an error.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelExportTask for usage and error information.
    +func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) {
    +	req, out := c.CancelExportTaskRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelImportTask = "CancelImportTask"
    +
    +// CancelImportTaskRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelImportTask operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelImportTask for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelImportTask method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelImportTaskRequest method.
    +//    req, resp := client.CancelImportTaskRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelImportTask,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelImportTaskInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CancelImportTaskOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelImportTask API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels an in-process import virtual machine or import snapshot task.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelImportTask for usage and error information.
    +func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) {
    +	req, out := c.CancelImportTaskRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelReservedInstancesListing = "CancelReservedInstancesListing"
    +
    +// CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelReservedInstancesListing operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelReservedInstancesListing for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelReservedInstancesListing method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelReservedInstancesListingRequest method.
    +//    req, resp := client.CancelReservedInstancesListingRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelReservedInstancesListing,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelReservedInstancesListingInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CancelReservedInstancesListingOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelReservedInstancesListing API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels the specified Reserved Instance listing in the Reserved Instance
    +// Marketplace.
    +//
    +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelReservedInstancesListing for usage and error information.
    +func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) {
    +	req, out := c.CancelReservedInstancesListingRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelSpotFleetRequests = "CancelSpotFleetRequests"
    +
    +// CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelSpotFleetRequests operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelSpotFleetRequests for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelSpotFleetRequests method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelSpotFleetRequestsRequest method.
    +//    req, resp := client.CancelSpotFleetRequestsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelSpotFleetRequests,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelSpotFleetRequestsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CancelSpotFleetRequestsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelSpotFleetRequests API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels the specified Spot fleet requests.
    +//
    +// After you cancel a Spot fleet request, the Spot fleet launches no new Spot
    +// instances. You must specify whether the Spot fleet should also terminate
    +// its Spot instances. If you terminate the instances, the Spot fleet request
    +// enters the cancelled_terminating state. Otherwise, the Spot fleet request
    +// enters the cancelled_running state and the instances continue to run until
    +// they are interrupted or you terminate them manually.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelSpotFleetRequests for usage and error information.
    +func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) {
    +	req, out := c.CancelSpotFleetRequestsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests"
    +
    +// CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the
    +// client's request for the CancelSpotInstanceRequests operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CancelSpotInstanceRequests for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CancelSpotInstanceRequests method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CancelSpotInstanceRequestsRequest method.
    +//    req, resp := client.CancelSpotInstanceRequestsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) {
    +	op := &request.Operation{
    +		Name:       opCancelSpotInstanceRequests,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CancelSpotInstanceRequestsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CancelSpotInstanceRequestsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CancelSpotInstanceRequests API operation for Amazon Elastic Compute Cloud.
    +//
    +// Cancels one or more Spot instance requests. Spot instances are instances
    +// that Amazon EC2 starts on your behalf when the bid price that you specify
    +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price
    +// based on available Spot instance capacity and current Spot instance requests.
    +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Canceling a Spot instance request does not terminate running Spot instances
    +// associated with the request.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CancelSpotInstanceRequests for usage and error information.
    +func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) {
    +	req, out := c.CancelSpotInstanceRequestsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opConfirmProductInstance = "ConfirmProductInstance"
    +
    +// ConfirmProductInstanceRequest generates a "aws/request.Request" representing the
    +// client's request for the ConfirmProductInstance operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ConfirmProductInstance for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ConfirmProductInstance method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ConfirmProductInstanceRequest method.
    +//    req, resp := client.ConfirmProductInstanceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) {
    +	op := &request.Operation{
    +		Name:       opConfirmProductInstance,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ConfirmProductInstanceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ConfirmProductInstanceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ConfirmProductInstance API operation for Amazon Elastic Compute Cloud.
    +//
    +// Determines whether a product code is associated with an instance. This action
    +// can only be used by the owner of the product code. It is useful when a product
    +// code owner needs to verify whether another user's instance is eligible for
    +// support.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ConfirmProductInstance for usage and error information.
    +func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) {
    +	req, out := c.ConfirmProductInstanceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCopyImage = "CopyImage"
    +
    +// CopyImageRequest generates a "aws/request.Request" representing the
    +// client's request for the CopyImage operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CopyImage for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CopyImage method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CopyImageRequest method.
    +//    req, resp := client.CopyImageRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) {
    +	op := &request.Operation{
    +		Name:       opCopyImage,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CopyImageInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CopyImageOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CopyImage API operation for Amazon Elastic Compute Cloud.
    +//
    +// Initiates the copy of an AMI from the specified source region to the current
    +// region. You specify the destination region by using its endpoint when making
    +// the request.
    +//
    +// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CopyImage for usage and error information.
    +func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) {
    +	req, out := c.CopyImageRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCopySnapshot = "CopySnapshot"
    +
    +// CopySnapshotRequest generates a "aws/request.Request" representing the
    +// client's request for the CopySnapshot operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CopySnapshot for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CopySnapshot method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CopySnapshotRequest method.
    +//    req, resp := client.CopySnapshotRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) {
    +	op := &request.Operation{
    +		Name:       opCopySnapshot,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CopySnapshotInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CopySnapshotOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CopySnapshot API operation for Amazon Elastic Compute Cloud.
    +//
    +// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon
    +// S3. You can copy the snapshot within the same region or from one region to
    +// another. You can use the snapshot to create EBS volumes or Amazon Machine
    +// Images (AMIs). The snapshot is copied to the regional endpoint that you send
    +// the HTTP request to.
    +//
    +// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted
    +// snapshots remain unencrypted, unless the Encrypted flag is specified during
    +// the snapshot copy operation. By default, encrypted snapshot copies use the
    +// default AWS Key Management Service (AWS KMS) customer master key (CMK); however,
    +// you can specify a non-default CMK with the KmsKeyId parameter.
    +//
    +// To copy an encrypted snapshot that has been shared from another account,
    +// you must have permissions for the CMK used to encrypt the snapshot.
    +//
    +// Snapshots created by the CopySnapshot action have an arbitrary volume ID
    +// that should not be used for any purpose.
    +//
    +// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CopySnapshot for usage and error information.
    +func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) {
    +	req, out := c.CopySnapshotRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateCustomerGateway = "CreateCustomerGateway"
    +
    +// CreateCustomerGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateCustomerGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateCustomerGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateCustomerGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateCustomerGatewayRequest method.
    +//    req, resp := client.CreateCustomerGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateCustomerGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateCustomerGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateCustomerGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateCustomerGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Provides information to AWS about your VPN customer gateway device. The customer
    +// gateway is the appliance at your end of the VPN connection. (The device on
    +// the AWS side of the VPN connection is the virtual private gateway.) You must
    +// provide the Internet-routable IP address of the customer gateway's external
    +// interface. The IP address must be static and may be behind a device performing
    +// network address translation (NAT).
    +//
    +// For devices that use Border Gateway Protocol (BGP), you can also provide
    +// the device's BGP Autonomous System Number (ASN). You can use an existing
    +// ASN assigned to your network. If you don't have an ASN already, you can use
    +// a private ASN (in the 64512 - 65534 range).
    +//
    +// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with
    +// the exception of 7224, which is reserved in the us-east-1 region, and 9059,
    +// which is reserved in the eu-west-1 region.
    +//
    +// For more information about VPN customer gateways, see Adding a Hardware Virtual
    +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// You cannot create more than one customer gateway with the same VPN type,
    +// IP address, and BGP ASN parameter values. If you run an identical request
    +// more than one time, the first request creates the customer gateway, and subsequent
    +// requests return information about the existing customer gateway. The subsequent
    +// requests do not create new customer gateway resources.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateCustomerGateway for usage and error information.
    +func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) {
    +	req, out := c.CreateCustomerGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateDhcpOptions = "CreateDhcpOptions"
    +
    +// CreateDhcpOptionsRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateDhcpOptions operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateDhcpOptions for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateDhcpOptions method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateDhcpOptionsRequest method.
    +//    req, resp := client.CreateDhcpOptionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateDhcpOptions,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateDhcpOptionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateDhcpOptionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateDhcpOptions API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a set of DHCP options for your VPC. After creating the set, you must
    +// associate it with the VPC, causing all existing and new instances that you
    +// launch in the VPC to use this set of DHCP options. The following are the
    +// individual DHCP options you can specify. For more information about the options,
    +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt).
    +//
    +//    * domain-name-servers - The IP addresses of up to four domain name servers,
    +//    or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS.
    +//    If specifying more than one domain name server, specify the IP addresses
    +//    in a single parameter, separated by commas. If you want your instance
    +//    to receive a custom DNS hostname as specified in domain-name, you must
    +//    set domain-name-servers to a custom DNS server.
    +//
    +//    * domain-name - If you're using AmazonProvidedDNS in "us-east-1", specify
    +//    "ec2.internal". If you're using AmazonProvidedDNS in another region, specify
    +//    "region.compute.internal" (for example, "ap-northeast-1.compute.internal").
    +//    Otherwise, specify a domain name (for example, "MyCompany.com"). This
    +//    value is used to complete unqualified DNS hostnames. Important: Some Linux
    +//    operating systems accept multiple domain names separated by spaces. However,
    +//    Windows and other Linux operating systems treat the value as a single
    +//    domain, which results in unexpected behavior. If your DHCP options set
    +//    is associated with a VPC that has instances with multiple operating systems,
    +//    specify only one domain name.
    +//
    +//    * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP)
    +//    servers.
    +//
    +//    * netbios-name-servers - The IP addresses of up to four NetBIOS name servers.
    +//
    +//    * netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend
    +//    that you specify 2 (broadcast and multicast are not currently supported).
    +//    For more information about these node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt).
    +//
    +// Your VPC automatically starts out with a set of DHCP options that includes
    +// only a DNS server that we provide (AmazonProvidedDNS). If you create a set
    +// of options, and if your VPC has an Internet gateway, make sure to set the
    +// domain-name-servers option either to AmazonProvidedDNS or to a domain name
    +// server of your choice. For more information about DHCP options, see DHCP
    +// Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateDhcpOptions for usage and error information.
    +func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) {
    +	req, out := c.CreateDhcpOptionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateFlowLogs = "CreateFlowLogs"
    +
    +// CreateFlowLogsRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateFlowLogs operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateFlowLogs for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateFlowLogs method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateFlowLogsRequest method.
    +//    req, resp := client.CreateFlowLogsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateFlowLogs,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateFlowLogsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateFlowLogsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateFlowLogs API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates one or more flow logs to capture IP traffic for a specific network
    +// interface, subnet, or VPC. Flow logs are delivered to a specified log group
    +// in Amazon CloudWatch Logs. If you specify a VPC or subnet in the request,
    +// a log stream is created in CloudWatch Logs for each network interface in
    +// the subnet or VPC. Log streams can include information about accepted and
    +// rejected traffic to a network interface. You can view the data in your log
    +// streams using Amazon CloudWatch Logs.
    +//
    +// In your request, you must also specify an IAM role that has permission to
    +// publish logs to CloudWatch Logs.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateFlowLogs for usage and error information.
    +func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) {
    +	req, out := c.CreateFlowLogsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateImage = "CreateImage"
    +
    +// CreateImageRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateImage operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateImage for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateImage method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateImageRequest method.
    +//    req, resp := client.CreateImageRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateImage,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateImageInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateImageOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateImage API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that
    +// is either running or stopped.
    +//
    +// If you customized your instance with instance store volumes or EBS volumes
    +// in addition to the root device volume, the new AMI contains block device
    +// mapping information for those volumes. When you launch an instance from this
    +// new AMI, the instance automatically launches with those additional volumes.
    +//
    +// For more information, see Creating Amazon EBS-Backed Linux AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateImage for usage and error information.
    +func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) {
    +	req, out := c.CreateImageRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateInstanceExportTask = "CreateInstanceExportTask"
    +
    +// CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateInstanceExportTask operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateInstanceExportTask for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateInstanceExportTask method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateInstanceExportTaskRequest method.
    +//    req, resp := client.CreateInstanceExportTaskRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateInstanceExportTask,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateInstanceExportTaskInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateInstanceExportTaskOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateInstanceExportTask API operation for Amazon Elastic Compute Cloud.
    +//
    +// Exports a running or stopped instance to an S3 bucket.
    +//
    +// For information about the supported operating systems, image formats, and
    +// known limitations for the types of instances you can export, see Exporting
    +// an Instance as a VM Using VM Import/Export (http://docs.aws.amazon.com/vm-import/latest/userguide/vmexport.html)
    +// in the VM Import/Export User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateInstanceExportTask for usage and error information.
    +func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) {
    +	req, out := c.CreateInstanceExportTaskRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateInternetGateway = "CreateInternetGateway"
    +
    +// CreateInternetGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateInternetGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateInternetGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateInternetGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateInternetGatewayRequest method.
    +//    req, resp := client.CreateInternetGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateInternetGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateInternetGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateInternetGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateInternetGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates an Internet gateway for use with a VPC. After creating the Internet
    +// gateway, you attach it to a VPC using AttachInternetGateway.
    +//
    +// For more information about your VPC and Internet gateway, see the Amazon
    +// Virtual Private Cloud User Guide (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateInternetGateway for usage and error information.
    +func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) {
    +	req, out := c.CreateInternetGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateKeyPair = "CreateKeyPair"
    +
    +// CreateKeyPairRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateKeyPair operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateKeyPair for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateKeyPair method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateKeyPairRequest method.
    +//    req, resp := client.CreateKeyPairRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateKeyPair,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateKeyPairInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateKeyPairOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateKeyPair API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores
    +// the public key and displays the private key for you to save to a file. The
    +// private key is returned as an unencrypted PEM encoded PKCS#8 private key.
    +// If a key with the specified name already exists, Amazon EC2 returns an error.
    +//
    +// You can have up to five thousand key pairs per region.
    +//
    +// The key pair returned to you is available only in the region in which you
    +// create it. To create a key pair that is available in all regions, use ImportKeyPair.
    +//
    +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateKeyPair for usage and error information.
    +func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) {
    +	req, out := c.CreateKeyPairRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateNatGateway = "CreateNatGateway"
    +
    +// CreateNatGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateNatGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateNatGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateNatGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateNatGatewayRequest method.
    +//    req, resp := client.CreateNatGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *request.Request, output *CreateNatGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateNatGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateNatGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateNatGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateNatGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a NAT gateway in the specified subnet. A NAT gateway can be used
    +// to enable instances in a private subnet to connect to the Internet. This
    +// action creates a network interface in the specified subnet with a private
    +// IP address from the IP address range of the subnet. For more information,
    +// see NAT Gateways (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateNatGateway for usage and error information.
    +func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayOutput, error) {
    +	req, out := c.CreateNatGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateNetworkAcl = "CreateNetworkAcl"
    +
    +// CreateNetworkAclRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateNetworkAcl operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateNetworkAcl for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateNetworkAcl method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateNetworkAclRequest method.
    +//    req, resp := client.CreateNetworkAclRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateNetworkAcl,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateNetworkAclInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateNetworkAclOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateNetworkAcl API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a network ACL in a VPC. Network ACLs provide an optional layer of
    +// security (in addition to security groups) for the instances in your VPC.
    +//
    +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateNetworkAcl for usage and error information.
    +func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) {
    +	req, out := c.CreateNetworkAclRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateNetworkAclEntry = "CreateNetworkAclEntry"
    +
    +// CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateNetworkAclEntry operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateNetworkAclEntry for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateNetworkAclEntry method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateNetworkAclEntryRequest method.
    +//    req, resp := client.CreateNetworkAclEntryRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateNetworkAclEntry,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateNetworkAclEntryInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &CreateNetworkAclEntryOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateNetworkAclEntry API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates an entry (a rule) in a network ACL with the specified rule number.
    +// Each network ACL has a set of numbered ingress rules and a separate set of
    +// numbered egress rules. When determining whether a packet should be allowed
    +// in or out of a subnet associated with the ACL, we process the entries in
    +// the ACL according to the rule numbers, in ascending order. Each network ACL
    +// has a set of ingress rules and a separate set of egress rules.
    +//
    +// We recommend that you leave room between the rule numbers (for example, 100,
    +// 110, 120, ...), and not number them one right after the other (for example,
    +// 101, 102, 103, ...). This makes it easier to add a rule between existing
    +// ones without having to renumber the rules.
    +//
    +// After you add an entry, you can't modify it; you must either replace it,
    +// or create an entry and delete the old one.
    +//
    +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateNetworkAclEntry for usage and error information.
    +func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) {
    +	req, out := c.CreateNetworkAclEntryRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateNetworkInterface = "CreateNetworkInterface"
    +
    +// CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateNetworkInterface operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateNetworkInterface for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateNetworkInterface method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateNetworkInterfaceRequest method.
    +//    req, resp := client.CreateNetworkInterfaceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateNetworkInterface,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateNetworkInterfaceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateNetworkInterfaceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateNetworkInterface API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a network interface in the specified subnet.
    +//
    +// For more information about network interfaces, see Elastic Network Interfaces
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the
    +// Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateNetworkInterface for usage and error information.
    +func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) {
    +	req, out := c.CreateNetworkInterfaceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreatePlacementGroup = "CreatePlacementGroup"
    +
    +// CreatePlacementGroupRequest generates a "aws/request.Request" representing the
    +// client's request for the CreatePlacementGroup operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreatePlacementGroup for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreatePlacementGroup method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreatePlacementGroupRequest method.
    +//    req, resp := client.CreatePlacementGroupRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) {
    +	op := &request.Operation{
    +		Name:       opCreatePlacementGroup,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreatePlacementGroupInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &CreatePlacementGroupOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreatePlacementGroup API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a placement group that you launch cluster instances into. You must
    +// give the group a name that's unique within the scope of your account.
    +//
    +// For more information about placement groups and cluster instances, see Cluster
    +// Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreatePlacementGroup for usage and error information.
    +func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) {
    +	req, out := c.CreatePlacementGroupRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateReservedInstancesListing = "CreateReservedInstancesListing"
    +
    +// CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateReservedInstancesListing operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateReservedInstancesListing for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateReservedInstancesListing method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateReservedInstancesListingRequest method.
    +//    req, resp := client.CreateReservedInstancesListingRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateReservedInstancesListing,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateReservedInstancesListingInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateReservedInstancesListingOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateReservedInstancesListing API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in
    +// the Reserved Instance Marketplace. You can submit one Standard Reserved Instance
    +// listing at a time. To get a list of your Standard Reserved Instances, you
    +// can use the DescribeReservedInstances operation.
    +//
    +// The Reserved Instance Marketplace matches sellers who want to resell Standard
    +// Reserved Instance capacity that they no longer need with buyers who want
    +// to purchase additional capacity. Reserved Instances bought and sold through
    +// the Reserved Instance Marketplace work like any other Reserved Instances.
    +//
    +// To sell your Standard Reserved Instances, you must first register as a seller
    +// in the Reserved Instance Marketplace. After completing the registration process,
    +// you can create a Reserved Instance Marketplace listing of some or all of
    +// your Standard Reserved Instances, and specify the upfront price to receive
    +// for them. Your Standard Reserved Instance listings then become available
    +// for purchase. To view the details of your Standard Reserved Instance listing,
    +// you can use the DescribeReservedInstancesListings operation.
    +//
    +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateReservedInstancesListing for usage and error information.
    +func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) {
    +	req, out := c.CreateReservedInstancesListingRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateRoute = "CreateRoute"
    +
    +// CreateRouteRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateRoute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateRoute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateRoute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateRouteRequest method.
    +//    req, resp := client.CreateRouteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateRoute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateRouteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateRouteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateRoute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a route in a route table within a VPC.
    +//
    +// You must specify one of the following targets: Internet gateway or virtual
    +// private gateway, NAT instance, NAT gateway, VPC peering connection, or network
    +// interface.
    +//
    +// When determining how to route traffic, we use the route with the most specific
    +// match. For example, let's say the traffic is destined for 192.0.2.3, and
    +// the route table includes the following two routes:
    +//
    +//    * 192.0.2.0/24 (goes to some target A)
    +//
    +//    * 192.0.2.0/28 (goes to some target B)
    +//
    +// Both routes apply to the traffic destined for 192.0.2.3. However, the second
    +// route in the list covers a smaller number of IP addresses and is therefore
    +// more specific, so we use that route to determine where to target the traffic.
    +//
    +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateRoute for usage and error information.
    +func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) {
    +	req, out := c.CreateRouteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateRouteTable = "CreateRouteTable"
    +
    +// CreateRouteTableRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateRouteTable operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateRouteTable for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateRouteTable method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateRouteTableRequest method.
    +//    req, resp := client.CreateRouteTableRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateRouteTable,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateRouteTableInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateRouteTableOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateRouteTable API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a route table for the specified VPC. After you create a route table,
    +// you can add routes and associate the table with a subnet.
    +//
    +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateRouteTable for usage and error information.
    +func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) {
    +	req, out := c.CreateRouteTableRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateSecurityGroup = "CreateSecurityGroup"
    +
    +// CreateSecurityGroupRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateSecurityGroup operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateSecurityGroup for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateSecurityGroup method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateSecurityGroupRequest method.
    +//    req, resp := client.CreateSecurityGroupRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateSecurityGroup,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateSecurityGroupInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateSecurityGroupOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateSecurityGroup API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a security group.
    +//
    +// A security group is for use with instances either in the EC2-Classic platform
    +// or in a specific VPC. For more information, see Amazon EC2 Security Groups
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)
    +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your
    +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// EC2-Classic: You can have up to 500 security groups.
    +//
    +// EC2-VPC: You can create up to 500 security groups per VPC.
    +//
    +// When you create a security group, you specify a friendly name of your choice.
    +// You can have a security group for use in EC2-Classic with the same name as
    +// a security group for use in a VPC. However, you can't have two security groups
    +// for use in EC2-Classic with the same name or two security groups for use
    +// in a VPC with the same name.
    +//
    +// You have a default security group for use in EC2-Classic and a default security
    +// group for use in your VPC. If you don't specify a security group when you
    +// launch an instance, the instance is launched into the appropriate default
    +// security group. A default security group includes a default rule that grants
    +// instances unrestricted network access to each other.
    +//
    +// You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress,
    +// AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateSecurityGroup for usage and error information.
    +func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) {
    +	req, out := c.CreateSecurityGroupRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateSnapshot = "CreateSnapshot"
    +
    +// CreateSnapshotRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateSnapshot operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateSnapshot for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateSnapshot method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateSnapshotRequest method.
    +//    req, resp := client.CreateSnapshotRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) {
    +	op := &request.Operation{
    +		Name:       opCreateSnapshot,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateSnapshotInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &Snapshot{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateSnapshot API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use
    +// snapshots for backups, to make copies of EBS volumes, and to save data before
    +// shutting down an instance.
    +//
    +// When a snapshot is created, any AWS Marketplace product codes that are associated
    +// with the source volume are propagated to the snapshot.
    +//
    +// You can take a snapshot of an attached volume that is in use. However, snapshots
    +// only capture data that has been written to your EBS volume at the time the
    +// snapshot command is issued; this may exclude any data that has been cached
    +// by any applications or the operating system. If you can pause any file systems
    +// on the volume long enough to take a snapshot, your snapshot should be complete.
    +// However, if you cannot pause all file writes to the volume, you should unmount
    +// the volume from within the instance, issue the snapshot command, and then
    +// remount the volume to ensure a consistent and complete snapshot. You may
    +// remount and use your volume while the snapshot status is pending.
    +//
    +// To create a snapshot for EBS volumes that serve as root devices, you should
    +// stop the instance before taking the snapshot.
    +//
    +// Snapshots that are taken from encrypted volumes are automatically encrypted.
    +// Volumes that are created from encrypted snapshots are also automatically
    +// encrypted. Your encrypted volumes and any associated snapshots always remain
    +// protected.
    +//
    +// For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html)
    +// and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateSnapshot for usage and error information.
    +func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) {
    +	req, out := c.CreateSnapshotRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription"
    +
    +// CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateSpotDatafeedSubscription operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateSpotDatafeedSubscription for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateSpotDatafeedSubscription method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateSpotDatafeedSubscriptionRequest method.
    +//    req, resp := client.CreateSpotDatafeedSubscriptionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateSpotDatafeedSubscription,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateSpotDatafeedSubscriptionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateSpotDatafeedSubscriptionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a data feed for Spot instances, enabling you to view Spot instance
    +// usage logs. You can create one data feed per AWS account. For more information,
    +// see Spot Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateSpotDatafeedSubscription for usage and error information.
    +func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) {
    +	req, out := c.CreateSpotDatafeedSubscriptionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateSubnet = "CreateSubnet"
    +
    +// CreateSubnetRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateSubnet operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateSubnet for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateSubnet method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateSubnetRequest method.
    +//    req, resp := client.CreateSubnetRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateSubnet,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateSubnetInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateSubnetOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateSubnet API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a subnet in an existing VPC.
    +//
    +// When you create each subnet, you provide the VPC ID and the CIDR block you
    +// want for the subnet. After you create a subnet, you can't change its CIDR
    +// block. The subnet's CIDR block can be the same as the VPC's CIDR block (assuming
    +// you want only a single subnet in the VPC), or a subset of the VPC's CIDR
    +// block. If you create more than one subnet in a VPC, the subnets' CIDR blocks
    +// must not overlap. The smallest subnet (and VPC) you can create uses a /28
    +// netmask (16 IP addresses), and the largest uses a /16 netmask (65,536 IP
    +// addresses).
    +//
    +// AWS reserves both the first four and the last IP address in each subnet's
    +// CIDR block. They're not available for use.
    +//
    +// If you add more than one subnet to a VPC, they're set up in a star topology
    +// with a logical router in the middle.
    +//
    +// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP
    +// address doesn't change if you stop and restart the instance (unlike a similar
    +// instance launched outside a VPC, which gets a new IP address when restarted).
    +// It's therefore possible to have a subnet with no running instances (they're
    +// all stopped), but no remaining IP addresses available.
    +//
    +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateSubnet for usage and error information.
    +func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) {
    +	req, out := c.CreateSubnetRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateTags = "CreateTags"
    +
    +// CreateTagsRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateTags operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateTags for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateTags method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateTagsRequest method.
    +//    req, resp := client.CreateTagsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateTags,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateTagsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &CreateTagsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateTags API operation for Amazon Elastic Compute Cloud.
    +//
    +// Adds or overwrites one or more tags for the specified Amazon EC2 resource
    +// or resources. Each resource can have a maximum of 50 tags. Each tag consists
    +// of a key and optional value. Tag keys must be unique per resource.
    +//
    +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
    +// in the Amazon Elastic Compute Cloud User Guide. For more information about
    +// creating IAM policies that control users' access to resources based on tags,
    +// see Supported Resource-Level Permissions for Amazon EC2 API Actions (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-iam-actions-resources.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateTags for usage and error information.
    +func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) {
    +	req, out := c.CreateTagsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVolume = "CreateVolume"
    +
    +// CreateVolumeRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVolume operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVolume for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVolume method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVolumeRequest method.
    +//    req, resp := client.CreateVolumeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) {
    +	op := &request.Operation{
    +		Name:       opCreateVolume,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVolumeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &Volume{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVolume API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates an EBS volume that can be attached to an instance in the same Availability
    +// Zone. The volume is created in the regional endpoint that you send the HTTP
    +// request to. For more information see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html).
    +//
    +// You can create a new empty volume or restore a volume from an EBS snapshot.
    +// Any AWS Marketplace product codes from the snapshot are propagated to the
    +// volume.
    +//
    +// You can create encrypted volumes with the Encrypted parameter. Encrypted
    +// volumes may only be attached to instances that support Amazon EBS encryption.
    +// Volumes that are created from encrypted snapshots are also automatically
    +// encrypted. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// For more information, see Creating or Restoring an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVolume for usage and error information.
    +func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) {
    +	req, out := c.CreateVolumeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVpc = "CreateVpc"
    +
    +// CreateVpcRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVpc operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVpc for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVpc method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVpcRequest method.
    +//    req, resp := client.CreateVpcRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateVpc,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVpcInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateVpcOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVpc API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a VPC with the specified CIDR block.
    +//
    +// The smallest VPC you can create uses a /28 netmask (16 IP addresses), and
    +// the largest uses a /16 netmask (65,536 IP addresses). To help you decide
    +// how big to make your VPC, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// By default, each instance you launch in the VPC has the default DHCP options,
    +// which includes only a default DNS server that we provide (AmazonProvidedDNS).
    +// For more information about DHCP options, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// You can specify the instance tenancy value for the VPC when you create it.
    +// You can't change this value for the VPC after you create it. For more information,
    +// see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVpc for usage and error information.
    +func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) {
    +	req, out := c.CreateVpcRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVpcEndpoint = "CreateVpcEndpoint"
    +
    +// CreateVpcEndpointRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVpcEndpoint operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVpcEndpoint for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVpcEndpoint method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVpcEndpointRequest method.
    +//    req, resp := client.CreateVpcEndpointRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateVpcEndpoint,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVpcEndpointInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateVpcEndpointOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVpcEndpoint API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a VPC endpoint for a specified AWS service. An endpoint enables you
    +// to create a private connection between your VPC and another AWS service in
    +// your account. You can specify an endpoint policy to attach to the endpoint
    +// that will control access to the service from your VPC. You can also specify
    +// the VPC route tables that use the endpoint.
    +//
    +// Currently, only endpoints to Amazon S3 are supported.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVpcEndpoint for usage and error information.
    +func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) {
    +	req, out := c.CreateVpcEndpointRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection"
    +
    +// CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVpcPeeringConnection operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVpcPeeringConnection for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVpcPeeringConnection method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVpcPeeringConnectionRequest method.
    +//    req, resp := client.CreateVpcPeeringConnectionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateVpcPeeringConnection,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVpcPeeringConnectionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateVpcPeeringConnectionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVpcPeeringConnection API operation for Amazon Elastic Compute Cloud.
    +//
    +// Requests a VPC peering connection between two VPCs: a requester VPC that
    +// you own and a peer VPC with which to create the connection. The peer VPC
    +// can belong to another AWS account. The requester VPC and peer VPC cannot
    +// have overlapping CIDR blocks.
    +//
    +// The owner of the peer VPC must accept the peering request to activate the
    +// peering connection. The VPC peering connection request expires after 7 days,
    +// after which it cannot be accepted or rejected.
    +//
    +// A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks
    +// results in the VPC peering connection having a status of failed.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVpcPeeringConnection for usage and error information.
    +func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) {
    +	req, out := c.CreateVpcPeeringConnectionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVpnConnection = "CreateVpnConnection"
    +
    +// CreateVpnConnectionRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVpnConnection operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVpnConnection for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVpnConnection method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVpnConnectionRequest method.
    +//    req, resp := client.CreateVpnConnectionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateVpnConnection,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVpnConnectionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateVpnConnectionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVpnConnection API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a VPN connection between an existing virtual private gateway and
    +// a VPN customer gateway. The only supported connection type is ipsec.1.
    +//
    +// The response includes information that you need to give to your network administrator
    +// to configure your customer gateway.
    +//
    +// We strongly recommend that you use HTTPS when calling this operation because
    +// the response contains sensitive cryptographic information for configuring
    +// your customer gateway.
    +//
    +// If you decide to shut down your VPN connection for any reason and later create
    +// a new VPN connection, you must reconfigure your customer gateway with the
    +// new information returned from this call.
    +//
    +// This is an idempotent operation. If you perform the operation more than once,
    +// Amazon EC2 doesn't return an error.
    +//
    +// For more information about VPN connections, see Adding a Hardware Virtual
    +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVpnConnection for usage and error information.
    +func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) {
    +	req, out := c.CreateVpnConnectionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute"
    +
    +// CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVpnConnectionRoute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVpnConnectionRoute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVpnConnectionRoute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVpnConnectionRouteRequest method.
    +//    req, resp := client.CreateVpnConnectionRouteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateVpnConnectionRoute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVpnConnectionRouteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &CreateVpnConnectionRouteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVpnConnectionRoute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a static route associated with a VPN connection between an existing
    +// virtual private gateway and a VPN customer gateway. The static route allows
    +// traffic to be routed from the virtual private gateway to the VPN customer
    +// gateway.
    +//
    +// For more information about VPN connections, see Adding a Hardware Virtual
    +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVpnConnectionRoute for usage and error information.
    +func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) {
    +	req, out := c.CreateVpnConnectionRouteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opCreateVpnGateway = "CreateVpnGateway"
    +
    +// CreateVpnGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the CreateVpnGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See CreateVpnGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the CreateVpnGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the CreateVpnGatewayRequest method.
    +//    req, resp := client.CreateVpnGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opCreateVpnGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &CreateVpnGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &CreateVpnGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// CreateVpnGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a virtual private gateway. A virtual private gateway is the endpoint
    +// on the VPC side of your VPN connection. You can create a virtual private
    +// gateway before creating the VPC itself.
    +//
    +// For more information about virtual private gateways, see Adding a Hardware
    +// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation CreateVpnGateway for usage and error information.
    +func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) {
    +	req, out := c.CreateVpnGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteCustomerGateway = "DeleteCustomerGateway"
    +
    +// DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteCustomerGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteCustomerGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteCustomerGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteCustomerGatewayRequest method.
    +//    req, resp := client.DeleteCustomerGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteCustomerGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteCustomerGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteCustomerGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteCustomerGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified customer gateway. You must delete the VPN connection
    +// before you can delete the customer gateway.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteCustomerGateway for usage and error information.
    +func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) {
    +	req, out := c.DeleteCustomerGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteDhcpOptions = "DeleteDhcpOptions"
    +
    +// DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteDhcpOptions operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteDhcpOptions for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteDhcpOptions method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteDhcpOptionsRequest method.
    +//    req, resp := client.DeleteDhcpOptionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteDhcpOptions,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteDhcpOptionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteDhcpOptionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteDhcpOptions API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified set of DHCP options. You must disassociate the set
    +// of DHCP options before you can delete it. You can disassociate the set of
    +// DHCP options by associating either a new set of options or the default set
    +// of options with the VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteDhcpOptions for usage and error information.
    +func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) {
    +	req, out := c.DeleteDhcpOptionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteFlowLogs = "DeleteFlowLogs"
    +
    +// DeleteFlowLogsRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteFlowLogs operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteFlowLogs for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteFlowLogs method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteFlowLogsRequest method.
    +//    req, resp := client.DeleteFlowLogsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteFlowLogs,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteFlowLogsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DeleteFlowLogsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteFlowLogs API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes one or more flow logs.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteFlowLogs for usage and error information.
    +func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) {
    +	req, out := c.DeleteFlowLogsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteInternetGateway = "DeleteInternetGateway"
    +
    +// DeleteInternetGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteInternetGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteInternetGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteInternetGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteInternetGatewayRequest method.
    +//    req, resp := client.DeleteInternetGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteInternetGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteInternetGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteInternetGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteInternetGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified Internet gateway. You must detach the Internet gateway
    +// from the VPC before you can delete it.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteInternetGateway for usage and error information.
    +func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) {
    +	req, out := c.DeleteInternetGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteKeyPair = "DeleteKeyPair"
    +
    +// DeleteKeyPairRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteKeyPair operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteKeyPair for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteKeyPair method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteKeyPairRequest method.
    +//    req, resp := client.DeleteKeyPairRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteKeyPair,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteKeyPairInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteKeyPairOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteKeyPair API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified key pair, by removing the public key from Amazon EC2.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteKeyPair for usage and error information.
    +func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) {
    +	req, out := c.DeleteKeyPairRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteNatGateway = "DeleteNatGateway"
    +
    +// DeleteNatGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteNatGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteNatGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteNatGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteNatGatewayRequest method.
    +//    req, resp := client.DeleteNatGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *request.Request, output *DeleteNatGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteNatGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteNatGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DeleteNatGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteNatGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its
    +// Elastic IP address, but does not release the address from your account. Deleting
    +// a NAT gateway does not delete any NAT gateway routes in your route tables.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteNatGateway for usage and error information.
    +func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayOutput, error) {
    +	req, out := c.DeleteNatGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteNetworkAcl = "DeleteNetworkAcl"
    +
    +// DeleteNetworkAclRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteNetworkAcl operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteNetworkAcl for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteNetworkAcl method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteNetworkAclRequest method.
    +//    req, resp := client.DeleteNetworkAclRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteNetworkAcl,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteNetworkAclInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteNetworkAclOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteNetworkAcl API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified network ACL. You can't delete the ACL if it's associated
    +// with any subnets. You can't delete the default network ACL.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteNetworkAcl for usage and error information.
    +func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) {
    +	req, out := c.DeleteNetworkAclRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry"
    +
    +// DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteNetworkAclEntry operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteNetworkAclEntry for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteNetworkAclEntry method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteNetworkAclEntryRequest method.
    +//    req, resp := client.DeleteNetworkAclEntryRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteNetworkAclEntry,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteNetworkAclEntryInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteNetworkAclEntryOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteNetworkAclEntry API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified ingress or egress entry (rule) from the specified network
    +// ACL.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteNetworkAclEntry for usage and error information.
    +func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) {
    +	req, out := c.DeleteNetworkAclEntryRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteNetworkInterface = "DeleteNetworkInterface"
    +
    +// DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteNetworkInterface operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteNetworkInterface for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteNetworkInterface method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteNetworkInterfaceRequest method.
    +//    req, resp := client.DeleteNetworkInterfaceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteNetworkInterface,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteNetworkInterfaceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteNetworkInterfaceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteNetworkInterface API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified network interface. You must detach the network interface
    +// before you can delete it.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteNetworkInterface for usage and error information.
    +func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) {
    +	req, out := c.DeleteNetworkInterfaceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeletePlacementGroup = "DeletePlacementGroup"
    +
    +// DeletePlacementGroupRequest generates a "aws/request.Request" representing the
    +// client's request for the DeletePlacementGroup operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeletePlacementGroup for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeletePlacementGroup method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeletePlacementGroupRequest method.
    +//    req, resp := client.DeletePlacementGroupRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) {
    +	op := &request.Operation{
    +		Name:       opDeletePlacementGroup,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeletePlacementGroupInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeletePlacementGroupOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeletePlacementGroup API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified placement group. You must terminate all instances in
    +// the placement group before you can delete the placement group. For more information
    +// about placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeletePlacementGroup for usage and error information.
    +func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) {
    +	req, out := c.DeletePlacementGroupRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteRoute = "DeleteRoute"
    +
    +// DeleteRouteRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteRoute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteRoute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteRoute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteRouteRequest method.
    +//    req, resp := client.DeleteRouteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteRoute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteRouteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteRouteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteRoute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified route from the specified route table.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteRoute for usage and error information.
    +func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) {
    +	req, out := c.DeleteRouteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteRouteTable = "DeleteRouteTable"
    +
    +// DeleteRouteTableRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteRouteTable operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteRouteTable for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteRouteTable method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteRouteTableRequest method.
    +//    req, resp := client.DeleteRouteTableRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteRouteTable,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteRouteTableInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteRouteTableOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteRouteTable API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified route table. You must disassociate the route table
    +// from any subnets before you can delete it. You can't delete the main route
    +// table.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteRouteTable for usage and error information.
    +func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) {
    +	req, out := c.DeleteRouteTableRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteSecurityGroup = "DeleteSecurityGroup"
    +
    +// DeleteSecurityGroupRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteSecurityGroup operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteSecurityGroup for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteSecurityGroup method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteSecurityGroupRequest method.
    +//    req, resp := client.DeleteSecurityGroupRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteSecurityGroup,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteSecurityGroupInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteSecurityGroupOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteSecurityGroup API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes a security group.
    +//
    +// If you attempt to delete a security group that is associated with an instance,
    +// or is referenced by another security group, the operation fails with InvalidGroup.InUse
    +// in EC2-Classic or DependencyViolation in EC2-VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteSecurityGroup for usage and error information.
    +func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) {
    +	req, out := c.DeleteSecurityGroupRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteSnapshot = "DeleteSnapshot"
    +
    +// DeleteSnapshotRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteSnapshot operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteSnapshot for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteSnapshot method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteSnapshotRequest method.
    +//    req, resp := client.DeleteSnapshotRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteSnapshot,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteSnapshotInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteSnapshotOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteSnapshot API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified snapshot.
    +//
    +// When you make periodic snapshots of a volume, the snapshots are incremental,
    +// and only the blocks on the device that have changed since your last snapshot
    +// are saved in the new snapshot. When you delete a snapshot, only the data
    +// not needed for any other snapshot is removed. So regardless of which prior
    +// snapshots have been deleted, all active snapshots will have access to all
    +// the information needed to restore the volume.
    +//
    +// You cannot delete a snapshot of the root device of an EBS volume used by
    +// a registered AMI. You must first de-register the AMI before you can delete
    +// the snapshot.
    +//
    +// For more information, see Deleting an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteSnapshot for usage and error information.
    +func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) {
    +	req, out := c.DeleteSnapshotRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription"
    +
    +// DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteSpotDatafeedSubscription operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteSpotDatafeedSubscription for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteSpotDatafeedSubscription method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteSpotDatafeedSubscriptionRequest method.
    +//    req, resp := client.DeleteSpotDatafeedSubscriptionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteSpotDatafeedSubscription,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteSpotDatafeedSubscriptionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteSpotDatafeedSubscriptionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the data feed for Spot instances.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteSpotDatafeedSubscription for usage and error information.
    +func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) {
    +	req, out := c.DeleteSpotDatafeedSubscriptionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteSubnet = "DeleteSubnet"
    +
    +// DeleteSubnetRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteSubnet operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteSubnet for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteSubnet method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteSubnetRequest method.
    +//    req, resp := client.DeleteSubnetRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteSubnet,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteSubnetInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteSubnetOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteSubnet API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified subnet. You must terminate all running instances in
    +// the subnet before you can delete the subnet.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteSubnet for usage and error information.
    +func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) {
    +	req, out := c.DeleteSubnetRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteTags = "DeleteTags"
    +
    +// DeleteTagsRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteTags operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteTags for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteTags method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteTagsRequest method.
    +//    req, resp := client.DeleteTagsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteTags,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteTagsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteTagsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteTags API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified set of tags from the specified set of resources. This
    +// call is designed to follow a DescribeTags request.
    +//
    +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteTags for usage and error information.
    +func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) {
    +	req, out := c.DeleteTagsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVolume = "DeleteVolume"
    +
    +// DeleteVolumeRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVolume operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVolume for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVolume method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVolumeRequest method.
    +//    req, resp := client.DeleteVolumeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVolume,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVolumeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteVolumeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVolume API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified EBS volume. The volume must be in the available state
    +// (not attached to an instance).
    +//
    +// The volume may remain in the deleting state for several minutes.
    +//
    +// For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVolume for usage and error information.
    +func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) {
    +	req, out := c.DeleteVolumeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVpc = "DeleteVpc"
    +
    +// DeleteVpcRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVpc operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVpc for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVpc method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVpcRequest method.
    +//    req, resp := client.DeleteVpcRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVpc,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVpcInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteVpcOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVpc API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified VPC. You must detach or delete all gateways and resources
    +// that are associated with the VPC before you can delete it. For example, you
    +// must terminate all instances running in the VPC, delete all security groups
    +// associated with the VPC (except the default one), delete all route tables
    +// associated with the VPC (except the default one), and so on.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVpc for usage and error information.
    +func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) {
    +	req, out := c.DeleteVpcRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVpcEndpoints = "DeleteVpcEndpoints"
    +
    +// DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVpcEndpoints operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVpcEndpoints for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVpcEndpoints method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVpcEndpointsRequest method.
    +//    req, resp := client.DeleteVpcEndpointsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVpcEndpoints,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVpcEndpointsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DeleteVpcEndpointsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVpcEndpoints API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes one or more specified VPC endpoints. Deleting the endpoint also deletes
    +// the endpoint routes in the route tables that were associated with the endpoint.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVpcEndpoints for usage and error information.
    +func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) {
    +	req, out := c.DeleteVpcEndpointsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection"
    +
    +// DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVpcPeeringConnection operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVpcPeeringConnection for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVpcPeeringConnection method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVpcPeeringConnectionRequest method.
    +//    req, resp := client.DeleteVpcPeeringConnectionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVpcPeeringConnection,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVpcPeeringConnectionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DeleteVpcPeeringConnectionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVpcPeeringConnection API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes a VPC peering connection. Either the owner of the requester VPC or
    +// the owner of the peer VPC can delete the VPC peering connection if it's in
    +// the active state. The owner of the requester VPC can delete a VPC peering
    +// connection in the pending-acceptance state.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVpcPeeringConnection for usage and error information.
    +func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) {
    +	req, out := c.DeleteVpcPeeringConnectionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVpnConnection = "DeleteVpnConnection"
    +
    +// DeleteVpnConnectionRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVpnConnection operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVpnConnection for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVpnConnection method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVpnConnectionRequest method.
    +//    req, resp := client.DeleteVpnConnectionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVpnConnection,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVpnConnectionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteVpnConnectionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVpnConnection API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified VPN connection.
    +//
    +// If you're deleting the VPC and its associated components, we recommend that
    +// you detach the virtual private gateway from the VPC and delete the VPC before
    +// deleting the VPN connection. If you believe that the tunnel credentials for
    +// your VPN connection have been compromised, you can delete the VPN connection
    +// and create a new one that has new keys, without needing to delete the VPC
    +// or virtual private gateway. If you create a new VPN connection, you must
    +// reconfigure the customer gateway using the new configuration information
    +// returned with the new VPN connection ID.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVpnConnection for usage and error information.
    +func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) {
    +	req, out := c.DeleteVpnConnectionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute"
    +
    +// DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVpnConnectionRoute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVpnConnectionRoute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVpnConnectionRoute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVpnConnectionRouteRequest method.
    +//    req, resp := client.DeleteVpnConnectionRouteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVpnConnectionRoute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVpnConnectionRouteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteVpnConnectionRouteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVpnConnectionRoute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified static route associated with a VPN connection between
    +// an existing virtual private gateway and a VPN customer gateway. The static
    +// route allows traffic to be routed from the virtual private gateway to the
    +// VPN customer gateway.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVpnConnectionRoute for usage and error information.
    +func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) {
    +	req, out := c.DeleteVpnConnectionRouteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeleteVpnGateway = "DeleteVpnGateway"
    +
    +// DeleteVpnGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the DeleteVpnGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeleteVpnGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeleteVpnGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeleteVpnGatewayRequest method.
    +//    req, resp := client.DeleteVpnGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opDeleteVpnGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeleteVpnGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeleteVpnGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeleteVpnGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deletes the specified virtual private gateway. We recommend that before you
    +// delete a virtual private gateway, you detach it from the VPC and delete the
    +// VPN connection. Note that you don't need to delete the virtual private gateway
    +// if you plan to delete and recreate the VPN connection between your VPC and
    +// your network.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeleteVpnGateway for usage and error information.
    +func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) {
    +	req, out := c.DeleteVpnGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDeregisterImage = "DeregisterImage"
    +
    +// DeregisterImageRequest generates a "aws/request.Request" representing the
    +// client's request for the DeregisterImage operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DeregisterImage for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DeregisterImage method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DeregisterImageRequest method.
    +//    req, resp := client.DeregisterImageRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) {
    +	op := &request.Operation{
    +		Name:       opDeregisterImage,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DeregisterImageInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DeregisterImageOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DeregisterImage API operation for Amazon Elastic Compute Cloud.
    +//
    +// Deregisters the specified AMI. After you deregister an AMI, it can't be used
    +// to launch new instances.
    +//
    +// This command does not delete the AMI.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DeregisterImage for usage and error information.
    +func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) {
    +	req, out := c.DeregisterImageRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeAccountAttributes = "DescribeAccountAttributes"
    +
    +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeAccountAttributes operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeAccountAttributes for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeAccountAttributes method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeAccountAttributesRequest method.
    +//    req, resp := client.DescribeAccountAttributesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeAccountAttributes,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeAccountAttributesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeAccountAttributesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeAccountAttributes API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes attributes of your AWS account. The following are the supported
    +// account attributes:
    +//
    +//    * supported-platforms: Indicates whether your account can launch instances
    +//    into EC2-Classic and EC2-VPC, or only into EC2-VPC.
    +//
    +//    * default-vpc: The ID of the default VPC for your account, or none.
    +//
    +//    * max-instances: The maximum number of On-Demand instances that you can
    +//    run.
    +//
    +//    * vpc-max-security-groups-per-interface: The maximum number of security
    +//    groups that you can assign to a network interface.
    +//
    +//    * max-elastic-ips: The maximum number of Elastic IP addresses that you
    +//    can allocate for use with EC2-Classic.
    +//
    +//    * vpc-max-elastic-ips: The maximum number of Elastic IP addresses that
    +//    you can allocate for use with EC2-VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeAccountAttributes for usage and error information.
    +func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) {
    +	req, out := c.DescribeAccountAttributesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeAddresses = "DescribeAddresses"
    +
    +// DescribeAddressesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeAddresses operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeAddresses for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeAddresses method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeAddressesRequest method.
    +//    req, resp := client.DescribeAddressesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeAddresses,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeAddressesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeAddressesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeAddresses API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your Elastic IP addresses.
    +//
    +// An Elastic IP address is for use in either the EC2-Classic platform or in
    +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeAddresses for usage and error information.
    +func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) {
    +	req, out := c.DescribeAddressesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeAvailabilityZones = "DescribeAvailabilityZones"
    +
    +// DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeAvailabilityZones operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeAvailabilityZones for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeAvailabilityZones method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeAvailabilityZonesRequest method.
    +//    req, resp := client.DescribeAvailabilityZonesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeAvailabilityZones,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeAvailabilityZonesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeAvailabilityZonesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeAvailabilityZones API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of the Availability Zones that are available to you.
    +// The results include zones only for the region you're currently using. If
    +// there is an event impacting an Availability Zone, you can use this request
    +// to view the state and any provided message for that Availability Zone.
    +//
    +// For more information, see Regions and Availability Zones (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeAvailabilityZones for usage and error information.
    +func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) {
    +	req, out := c.DescribeAvailabilityZonesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeBundleTasks = "DescribeBundleTasks"
    +
    +// DescribeBundleTasksRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeBundleTasks operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeBundleTasks for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeBundleTasks method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeBundleTasksRequest method.
    +//    req, resp := client.DescribeBundleTasksRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeBundleTasks,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeBundleTasksInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeBundleTasksOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeBundleTasks API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your bundling tasks.
    +//
    +// Completed bundle tasks are listed for only a limited time. If your bundle
    +// task is no longer in the list, you can still register an AMI from it. Just
    +// use RegisterImage with the Amazon S3 bucket name and image manifest name
    +// you provided to the bundle task.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeBundleTasks for usage and error information.
    +func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) {
    +	req, out := c.DescribeBundleTasksRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances"
    +
    +// DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeClassicLinkInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeClassicLinkInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeClassicLinkInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeClassicLinkInstancesRequest method.
    +//    req, resp := client.DescribeClassicLinkInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeClassicLinkInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeClassicLinkInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeClassicLinkInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeClassicLinkInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your linked EC2-Classic instances. This request
    +// only returns information about EC2-Classic instances linked to a VPC through
    +// ClassicLink; you cannot use this request to return information about other
    +// instances.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeClassicLinkInstances for usage and error information.
    +func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) {
    +	req, out := c.DescribeClassicLinkInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeConversionTasks = "DescribeConversionTasks"
    +
    +// DescribeConversionTasksRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeConversionTasks operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeConversionTasks for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeConversionTasks method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeConversionTasksRequest method.
    +//    req, resp := client.DescribeConversionTasksRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeConversionTasks,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeConversionTasksInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeConversionTasksOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeConversionTasks API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your conversion tasks. For more information, see
    +// the VM Import/Export User Guide (http://docs.aws.amazon.com/vm-import/latest/userguide/).
    +//
    +// For information about the import manifest referenced by this API action,
    +// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeConversionTasks for usage and error information.
    +func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) {
    +	req, out := c.DescribeConversionTasksRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeCustomerGateways = "DescribeCustomerGateways"
    +
    +// DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeCustomerGateways operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeCustomerGateways for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeCustomerGateways method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeCustomerGatewaysRequest method.
    +//    req, resp := client.DescribeCustomerGatewaysRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeCustomerGateways,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeCustomerGatewaysInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeCustomerGatewaysOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeCustomerGateways API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your VPN customer gateways.
    +//
    +// For more information about VPN customer gateways, see Adding a Hardware Virtual
    +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeCustomerGateways for usage and error information.
    +func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) {
    +	req, out := c.DescribeCustomerGatewaysRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeDhcpOptions = "DescribeDhcpOptions"
    +
    +// DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeDhcpOptions operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeDhcpOptions for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeDhcpOptions method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeDhcpOptionsRequest method.
    +//    req, resp := client.DescribeDhcpOptionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeDhcpOptions,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeDhcpOptionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeDhcpOptionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeDhcpOptions API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your DHCP options sets.
    +//
    +// For more information about DHCP options sets, see DHCP Options Sets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeDhcpOptions for usage and error information.
    +func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) {
    +	req, out := c.DescribeDhcpOptionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeExportTasks = "DescribeExportTasks"
    +
    +// DescribeExportTasksRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeExportTasks operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeExportTasks for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeExportTasks method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeExportTasksRequest method.
    +//    req, resp := client.DescribeExportTasksRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeExportTasks,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeExportTasksInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeExportTasksOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeExportTasks API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your export tasks.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeExportTasks for usage and error information.
    +func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) {
    +	req, out := c.DescribeExportTasksRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeFlowLogs = "DescribeFlowLogs"
    +
    +// DescribeFlowLogsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeFlowLogs operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeFlowLogs for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeFlowLogs method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeFlowLogsRequest method.
    +//    req, resp := client.DescribeFlowLogsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeFlowLogs,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeFlowLogsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeFlowLogsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeFlowLogs API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more flow logs. To view the information in your flow logs
    +// (the log streams for the network interfaces), you must use the CloudWatch
    +// Logs console or the CloudWatch Logs API.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeFlowLogs for usage and error information.
    +func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) {
    +	req, out := c.DescribeFlowLogsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeHostReservationOfferings = "DescribeHostReservationOfferings"
    +
    +// DescribeHostReservationOfferingsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeHostReservationOfferings operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeHostReservationOfferings for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeHostReservationOfferings method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeHostReservationOfferingsRequest method.
    +//    req, resp := client.DescribeHostReservationOfferingsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeHostReservationOfferingsRequest(input *DescribeHostReservationOfferingsInput) (req *request.Request, output *DescribeHostReservationOfferingsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeHostReservationOfferings,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeHostReservationOfferingsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeHostReservationOfferingsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeHostReservationOfferings API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the Dedicated Host Reservations that are available to purchase.
    +//
    +// The results describe all the Dedicated Host Reservation offerings, including
    +// offerings that may not match the instance family and region of your Dedicated
    +// Hosts. When purchasing an offering, ensure that the the instance family and
    +// region of the offering matches that of the Dedicated Host/s it will be associated
    +// with. For an overview of supported instance types, see Dedicated Hosts Overview
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeHostReservationOfferings for usage and error information.
    +func (c *EC2) DescribeHostReservationOfferings(input *DescribeHostReservationOfferingsInput) (*DescribeHostReservationOfferingsOutput, error) {
    +	req, out := c.DescribeHostReservationOfferingsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeHostReservations = "DescribeHostReservations"
    +
    +// DescribeHostReservationsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeHostReservations operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeHostReservations for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeHostReservations method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeHostReservationsRequest method.
    +//    req, resp := client.DescribeHostReservationsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeHostReservationsRequest(input *DescribeHostReservationsInput) (req *request.Request, output *DescribeHostReservationsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeHostReservations,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeHostReservationsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeHostReservationsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeHostReservations API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes Dedicated Host Reservations which are associated with Dedicated
    +// Hosts in your account.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeHostReservations for usage and error information.
    +func (c *EC2) DescribeHostReservations(input *DescribeHostReservationsInput) (*DescribeHostReservationsOutput, error) {
    +	req, out := c.DescribeHostReservationsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeHosts = "DescribeHosts"
    +
    +// DescribeHostsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeHosts operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeHosts for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeHosts method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeHostsRequest method.
    +//    req, resp := client.DescribeHostsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Request, output *DescribeHostsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeHosts,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeHostsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeHostsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeHosts API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your Dedicated Hosts.
    +//
    +// The results describe only the Dedicated Hosts in the region you're currently
    +// using. All listed instances consume capacity on your Dedicated Host. Dedicated
    +// Hosts that have recently been released will be listed with the state released.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeHosts for usage and error information.
    +func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, error) {
    +	req, out := c.DescribeHostsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeIdFormat = "DescribeIdFormat"
    +
    +// DescribeIdFormatRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeIdFormat operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeIdFormat for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeIdFormat method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeIdFormatRequest method.
    +//    req, resp := client.DescribeIdFormatRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *request.Request, output *DescribeIdFormatOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeIdFormat,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeIdFormatInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeIdFormatOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeIdFormat API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the ID format settings for your resources on a per-region basis,
    +// for example, to view which resource types are enabled for longer IDs. This
    +// request only returns information about resource types whose ID formats can
    +// be modified; it does not return information about other resource types.
    +//
    +// The following resource types support longer IDs: instance | reservation |
    +// snapshot | volume.
    +//
    +// These settings apply to the IAM user who makes the request; they do not apply
    +// to the entire AWS account. By default, an IAM user defaults to the same settings
    +// as the root user, unless they explicitly override the settings by running
    +// the ModifyIdFormat command. Resources created with longer IDs are visible
    +// to all IAM users, regardless of these settings and provided that they have
    +// permission to use the relevant Describe command for the resource type.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeIdFormat for usage and error information.
    +func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) {
    +	req, out := c.DescribeIdFormatRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat"
    +
    +// DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeIdentityIdFormat operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeIdentityIdFormat for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeIdentityIdFormat method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeIdentityIdFormatRequest method.
    +//    req, resp := client.DescribeIdentityIdFormatRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInput) (req *request.Request, output *DescribeIdentityIdFormatOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeIdentityIdFormat,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeIdentityIdFormatInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeIdentityIdFormatOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeIdentityIdFormat API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the ID format settings for resources for the specified IAM user,
    +// IAM role, or root user. For example, you can view the resource types that
    +// are enabled for longer IDs. This request only returns information about resource
    +// types whose ID formats can be modified; it does not return information about
    +// other resource types. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// The following resource types support longer IDs: instance | reservation |
    +// snapshot | volume.
    +//
    +// These settings apply to the principal specified in the request. They do not
    +// apply to the principal that makes the request.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeIdentityIdFormat for usage and error information.
    +func (c *EC2) DescribeIdentityIdFormat(input *DescribeIdentityIdFormatInput) (*DescribeIdentityIdFormatOutput, error) {
    +	req, out := c.DescribeIdentityIdFormatRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeImageAttribute = "DescribeImageAttribute"
    +
    +// DescribeImageAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeImageAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeImageAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeImageAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeImageAttributeRequest method.
    +//    req, resp := client.DescribeImageAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeImageAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeImageAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeImageAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeImageAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the specified attribute of the specified AMI. You can specify only
    +// one attribute at a time.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeImageAttribute for usage and error information.
    +func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) {
    +	req, out := c.DescribeImageAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeImages = "DescribeImages"
    +
    +// DescribeImagesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeImages operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeImages for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeImages method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeImagesRequest method.
    +//    req, resp := client.DescribeImagesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeImages,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeImagesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeImagesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeImages API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of the images (AMIs, AKIs, and ARIs) available to you.
    +// Images available to you include public images, private images that you own,
    +// and private images owned by other AWS accounts but for which you have explicit
    +// launch permissions.
    +//
    +// Deregistered images are included in the returned results for an unspecified
    +// interval after deregistration.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeImages for usage and error information.
    +func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) {
    +	req, out := c.DescribeImagesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeImportImageTasks = "DescribeImportImageTasks"
    +
    +// DescribeImportImageTasksRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeImportImageTasks operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeImportImageTasks for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeImportImageTasks method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeImportImageTasksRequest method.
    +//    req, resp := client.DescribeImportImageTasksRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeImportImageTasks,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeImportImageTasksInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeImportImageTasksOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeImportImageTasks API operation for Amazon Elastic Compute Cloud.
    +//
    +// Displays details about an import virtual machine or import snapshot tasks
    +// that are already created.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeImportImageTasks for usage and error information.
    +func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) {
    +	req, out := c.DescribeImportImageTasksRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks"
    +
    +// DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeImportSnapshotTasks operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeImportSnapshotTasks for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeImportSnapshotTasks method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeImportSnapshotTasksRequest method.
    +//    req, resp := client.DescribeImportSnapshotTasksRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeImportSnapshotTasks,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeImportSnapshotTasksInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeImportSnapshotTasksOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeImportSnapshotTasks API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes your import snapshot tasks.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeImportSnapshotTasks for usage and error information.
    +func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) {
    +	req, out := c.DescribeImportSnapshotTasksRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeInstanceAttribute = "DescribeInstanceAttribute"
    +
    +// DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeInstanceAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeInstanceAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeInstanceAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeInstanceAttributeRequest method.
    +//    req, resp := client.DescribeInstanceAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeInstanceAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeInstanceAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeInstanceAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeInstanceAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the specified attribute of the specified instance. You can specify
    +// only one attribute at a time. Valid attribute values are: instanceType |
    +// kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior
    +// | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck |
    +// groupSet | ebsOptimized | sriovNetSupport
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeInstanceAttribute for usage and error information.
    +func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) {
    +	req, out := c.DescribeInstanceAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeInstanceStatus = "DescribeInstanceStatus"
    +
    +// DescribeInstanceStatusRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeInstanceStatus operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeInstanceStatus for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeInstanceStatus method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeInstanceStatusRequest method.
    +//    req, resp := client.DescribeInstanceStatusRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeInstanceStatus,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeInstanceStatusInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeInstanceStatusOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeInstanceStatus API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the status of one or more instances. By default, only running instances
    +// are described, unless specified otherwise.
    +//
    +// Instance status includes the following components:
    +//
    +//    * Status checks - Amazon EC2 performs status checks on running EC2 instances
    +//    to identify hardware and software issues. For more information, see Status
    +//    Checks for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html)
    +//    and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html)
    +//    in the Amazon Elastic Compute Cloud User Guide.
    +//
    +//    * Scheduled events - Amazon EC2 can schedule events (such as reboot, stop,
    +//    or terminate) for your instances related to hardware issues, software
    +//    updates, or system maintenance. For more information, see Scheduled Events
    +//    for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html)
    +//    in the Amazon Elastic Compute Cloud User Guide.
    +//
    +//    * Instance state - You can manage your instances from the moment you launch
    +//    them through their termination. For more information, see Instance Lifecycle
    +//    (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
    +//    in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeInstanceStatus for usage and error information.
    +func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) {
    +	req, out := c.DescribeInstanceStatusRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeInstanceStatusPages iterates over the pages of a DescribeInstanceStatus operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeInstanceStatus method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeInstanceStatus operation.
    +//    pageNum := 0
    +//    err := client.DescribeInstanceStatusPages(params,
    +//        func(page *DescribeInstanceStatusOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeInstanceStatusRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeInstanceStatusOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeInstances = "DescribeInstances"
    +
    +// DescribeInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeInstancesRequest method.
    +//    req, resp := client.DescribeInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your instances.
    +//
    +// If you specify one or more instance IDs, Amazon EC2 returns information for
    +// those instances. If you do not specify instance IDs, Amazon EC2 returns information
    +// for all relevant instances. If you specify an instance ID that is not valid,
    +// an error is returned. If you specify an instance that you do not own, it
    +// is not included in the returned results.
    +//
    +// Recently terminated instances might appear in the returned results. This
    +// interval is usually less than one hour.
    +//
    +// If you describe instances in the rare case where an Availability Zone is
    +// experiencing a service disruption and you specify instance IDs that are in
    +// the affected zone, or do not specify any instance IDs at all, the call fails.
    +// If you describe instances and specify only instance IDs that are in an unaffected
    +// zone, the call works normally.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeInstances for usage and error information.
    +func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) {
    +	req, out := c.DescribeInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeInstancesPages iterates over the pages of a DescribeInstances operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeInstances method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeInstances operation.
    +//    pageNum := 0
    +//    err := client.DescribeInstancesPages(params,
    +//        func(page *DescribeInstancesOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeInstancesRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeInstancesOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeInternetGateways = "DescribeInternetGateways"
    +
    +// DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeInternetGateways operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeInternetGateways for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeInternetGateways method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeInternetGatewaysRequest method.
    +//    req, resp := client.DescribeInternetGatewaysRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeInternetGateways,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeInternetGatewaysInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeInternetGatewaysOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeInternetGateways API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your Internet gateways.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeInternetGateways for usage and error information.
    +func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) {
    +	req, out := c.DescribeInternetGatewaysRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeKeyPairs = "DescribeKeyPairs"
    +
    +// DescribeKeyPairsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeKeyPairs operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeKeyPairs for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeKeyPairs method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeKeyPairsRequest method.
    +//    req, resp := client.DescribeKeyPairsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeKeyPairs,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeKeyPairsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeKeyPairsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeKeyPairs API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your key pairs.
    +//
    +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeKeyPairs for usage and error information.
    +func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) {
    +	req, out := c.DescribeKeyPairsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeMovingAddresses = "DescribeMovingAddresses"
    +
    +// DescribeMovingAddressesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeMovingAddresses operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeMovingAddresses for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeMovingAddresses method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeMovingAddressesRequest method.
    +//    req, resp := client.DescribeMovingAddressesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeMovingAddresses,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeMovingAddressesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeMovingAddressesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeMovingAddresses API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform,
    +// or that are being restored to the EC2-Classic platform. This request does
    +// not return information about any other Elastic IP addresses in your account.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeMovingAddresses for usage and error information.
    +func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) {
    +	req, out := c.DescribeMovingAddressesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeNatGateways = "DescribeNatGateways"
    +
    +// DescribeNatGatewaysRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeNatGateways operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeNatGateways for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeNatGateways method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeNatGatewaysRequest method.
    +//    req, resp := client.DescribeNatGatewaysRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req *request.Request, output *DescribeNatGatewaysOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeNatGateways,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeNatGatewaysInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeNatGatewaysOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeNatGateways API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of the your NAT gateways.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeNatGateways for usage and error information.
    +func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNatGatewaysOutput, error) {
    +	req, out := c.DescribeNatGatewaysRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeNetworkAcls = "DescribeNetworkAcls"
    +
    +// DescribeNetworkAclsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeNetworkAcls operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeNetworkAcls for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeNetworkAcls method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeNetworkAclsRequest method.
    +//    req, resp := client.DescribeNetworkAclsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeNetworkAcls,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeNetworkAclsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeNetworkAclsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeNetworkAcls API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your network ACLs.
    +//
    +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeNetworkAcls for usage and error information.
    +func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) {
    +	req, out := c.DescribeNetworkAclsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute"
    +
    +// DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeNetworkInterfaceAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeNetworkInterfaceAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeNetworkInterfaceAttributeRequest method.
    +//    req, resp := client.DescribeNetworkInterfaceAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeNetworkInterfaceAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeNetworkInterfaceAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeNetworkInterfaceAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeNetworkInterfaceAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes a network interface attribute. You can specify only one attribute
    +// at a time.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeNetworkInterfaceAttribute for usage and error information.
    +func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) {
    +	req, out := c.DescribeNetworkInterfaceAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces"
    +
    +// DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeNetworkInterfaces operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeNetworkInterfaces for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeNetworkInterfaces method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeNetworkInterfacesRequest method.
    +//    req, resp := client.DescribeNetworkInterfacesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeNetworkInterfaces,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeNetworkInterfacesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeNetworkInterfacesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeNetworkInterfaces API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your network interfaces.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeNetworkInterfaces for usage and error information.
    +func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) {
    +	req, out := c.DescribeNetworkInterfacesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribePlacementGroups = "DescribePlacementGroups"
    +
    +// DescribePlacementGroupsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribePlacementGroups operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribePlacementGroups for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribePlacementGroups method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribePlacementGroupsRequest method.
    +//    req, resp := client.DescribePlacementGroupsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribePlacementGroups,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribePlacementGroupsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribePlacementGroupsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribePlacementGroups API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your placement groups. For more information about
    +// placement groups and cluster instances, see Cluster Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribePlacementGroups for usage and error information.
    +func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) {
    +	req, out := c.DescribePlacementGroupsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribePrefixLists = "DescribePrefixLists"
    +
    +// DescribePrefixListsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribePrefixLists operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribePrefixLists for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribePrefixLists method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribePrefixListsRequest method.
    +//    req, resp := client.DescribePrefixListsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribePrefixLists,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribePrefixListsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribePrefixListsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribePrefixLists API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes available AWS services in a prefix list format, which includes
    +// the prefix list name and prefix list ID of the service and the IP address
    +// range for the service. A prefix list ID is required for creating an outbound
    +// security group rule that allows traffic from a VPC to access an AWS service
    +// through a VPC endpoint.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribePrefixLists for usage and error information.
    +func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) {
    +	req, out := c.DescribePrefixListsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeRegions = "DescribeRegions"
    +
    +// DescribeRegionsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeRegions operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeRegions for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeRegions method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeRegionsRequest method.
    +//    req, resp := client.DescribeRegionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeRegions,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeRegionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeRegionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeRegions API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more regions that are currently available to you.
    +//
    +// For a list of the regions supported by Amazon EC2, see Regions and Endpoints
    +// (http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeRegions for usage and error information.
    +func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) {
    +	req, out := c.DescribeRegionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeReservedInstances = "DescribeReservedInstances"
    +
    +// DescribeReservedInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeReservedInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeReservedInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeReservedInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeReservedInstancesRequest method.
    +//    req, resp := client.DescribeReservedInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeReservedInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeReservedInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeReservedInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeReservedInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of the Reserved Instances that you purchased.
    +//
    +// For more information about Reserved Instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeReservedInstances for usage and error information.
    +func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) {
    +	req, out := c.DescribeReservedInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings"
    +
    +// DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeReservedInstancesListings operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeReservedInstancesListings for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeReservedInstancesListings method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeReservedInstancesListingsRequest method.
    +//    req, resp := client.DescribeReservedInstancesListingsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeReservedInstancesListings,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeReservedInstancesListingsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeReservedInstancesListingsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeReservedInstancesListings API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes your account's Reserved Instance listings in the Reserved Instance
    +// Marketplace.
    +//
    +// The Reserved Instance Marketplace matches sellers who want to resell Reserved
    +// Instance capacity that they no longer need with buyers who want to purchase
    +// additional capacity. Reserved Instances bought and sold through the Reserved
    +// Instance Marketplace work like any other Reserved Instances.
    +//
    +// As a seller, you choose to list some or all of your Reserved Instances, and
    +// you specify the upfront price to receive for them. Your Reserved Instances
    +// are then listed in the Reserved Instance Marketplace and are available for
    +// purchase.
    +//
    +// As a buyer, you specify the configuration of the Reserved Instance to purchase,
    +// and the Marketplace matches what you're searching for with what's available.
    +// The Marketplace first sells the lowest priced Reserved Instances to you,
    +// and continues to sell available Reserved Instance listings to you until your
    +// demand is met. You are charged based on the total price of all of the listings
    +// that you purchase.
    +//
    +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeReservedInstancesListings for usage and error information.
    +func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) {
    +	req, out := c.DescribeReservedInstancesListingsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications"
    +
    +// DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeReservedInstancesModifications operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeReservedInstancesModifications for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeReservedInstancesModifications method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeReservedInstancesModificationsRequest method.
    +//    req, resp := client.DescribeReservedInstancesModificationsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeReservedInstancesModifications,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeReservedInstancesModificationsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeReservedInstancesModificationsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeReservedInstancesModifications API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the modifications made to your Reserved Instances. If no parameter
    +// is specified, information about all your Reserved Instances modification
    +// requests is returned. If a modification ID is specified, only information
    +// about the specific modification is returned.
    +//
    +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeReservedInstancesModifications for usage and error information.
    +func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) {
    +	req, out := c.DescribeReservedInstancesModificationsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeReservedInstancesModificationsPages iterates over the pages of a DescribeReservedInstancesModifications operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeReservedInstancesModifications method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeReservedInstancesModifications operation.
    +//    pageNum := 0
    +//    err := client.DescribeReservedInstancesModificationsPages(params,
    +//        func(page *DescribeReservedInstancesModificationsOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeReservedInstancesModificationsRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeReservedInstancesModificationsOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings"
    +
    +// DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeReservedInstancesOfferings operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeReservedInstancesOfferings for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeReservedInstancesOfferings method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeReservedInstancesOfferingsRequest method.
    +//    req, resp := client.DescribeReservedInstancesOfferingsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeReservedInstancesOfferings,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeReservedInstancesOfferingsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeReservedInstancesOfferingsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeReservedInstancesOfferings API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes Reserved Instance offerings that are available for purchase. With
    +// Reserved Instances, you purchase the right to launch instances for a period
    +// of time. During that time period, you do not receive insufficient capacity
    +// errors, and you pay a lower usage rate than the rate charged for On-Demand
    +// instances for the actual time used.
    +//
    +// If you have listed your own Reserved Instances for sale in the Reserved Instance
    +// Marketplace, they will be excluded from these results. This is to ensure
    +// that you do not purchase your own Reserved Instances.
    +//
    +// For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeReservedInstancesOfferings for usage and error information.
    +func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) {
    +	req, out := c.DescribeReservedInstancesOfferingsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeReservedInstancesOfferingsPages iterates over the pages of a DescribeReservedInstancesOfferings operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeReservedInstancesOfferings method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeReservedInstancesOfferings operation.
    +//    pageNum := 0
    +//    err := client.DescribeReservedInstancesOfferingsPages(params,
    +//        func(page *DescribeReservedInstancesOfferingsOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeReservedInstancesOfferingsRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeReservedInstancesOfferingsOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeRouteTables = "DescribeRouteTables"
    +
    +// DescribeRouteTablesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeRouteTables operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeRouteTables for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeRouteTables method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeRouteTablesRequest method.
    +//    req, resp := client.DescribeRouteTablesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeRouteTables,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeRouteTablesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeRouteTablesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeRouteTables API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your route tables.
    +//
    +// Each subnet in your VPC must be associated with a route table. If a subnet
    +// is not explicitly associated with any route table, it is implicitly associated
    +// with the main route table. This command does not return the subnet ID for
    +// implicit associations.
    +//
    +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeRouteTables for usage and error information.
    +func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) {
    +	req, out := c.DescribeRouteTablesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability"
    +
    +// DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeScheduledInstanceAvailability operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeScheduledInstanceAvailability for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeScheduledInstanceAvailability method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeScheduledInstanceAvailabilityRequest method.
    +//    req, resp := client.DescribeScheduledInstanceAvailabilityRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeScheduledInstanceAvailability,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeScheduledInstanceAvailabilityInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeScheduledInstanceAvailabilityOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeScheduledInstanceAvailability API operation for Amazon Elastic Compute Cloud.
    +//
    +// Finds available schedules that meet the specified criteria.
    +//
    +// You can search for an available schedule no more than 3 months in advance.
    +// You must meet the minimum required duration of 1,200 hours per year. For
    +// example, the minimum daily schedule is 4 hours, the minimum weekly schedule
    +// is 24 hours, and the minimum monthly schedule is 100 hours.
    +//
    +// After you find a schedule that meets your needs, call PurchaseScheduledInstances
    +// to purchase Scheduled Instances with that schedule.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeScheduledInstanceAvailability for usage and error information.
    +func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) {
    +	req, out := c.DescribeScheduledInstanceAvailabilityRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeScheduledInstances = "DescribeScheduledInstances"
    +
    +// DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeScheduledInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeScheduledInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeScheduledInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeScheduledInstancesRequest method.
    +//    req, resp := client.DescribeScheduledInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeScheduledInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeScheduledInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeScheduledInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeScheduledInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your Scheduled Instances.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeScheduledInstances for usage and error information.
    +func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) {
    +	req, out := c.DescribeScheduledInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences"
    +
    +// DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSecurityGroupReferences operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSecurityGroupReferences for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSecurityGroupReferences method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSecurityGroupReferencesRequest method.
    +//    req, resp := client.DescribeSecurityGroupReferencesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSecurityGroupReferencesRequest(input *DescribeSecurityGroupReferencesInput) (req *request.Request, output *DescribeSecurityGroupReferencesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSecurityGroupReferences,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSecurityGroupReferencesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSecurityGroupReferencesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSecurityGroupReferences API operation for Amazon Elastic Compute Cloud.
    +//
    +// [EC2-VPC only] Describes the VPCs on the other side of a VPC peering connection
    +// that are referencing the security groups you've specified in this request.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSecurityGroupReferences for usage and error information.
    +func (c *EC2) DescribeSecurityGroupReferences(input *DescribeSecurityGroupReferencesInput) (*DescribeSecurityGroupReferencesOutput, error) {
    +	req, out := c.DescribeSecurityGroupReferencesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSecurityGroups = "DescribeSecurityGroups"
    +
    +// DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSecurityGroups operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSecurityGroups for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSecurityGroups method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSecurityGroupsRequest method.
    +//    req, resp := client.DescribeSecurityGroupsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSecurityGroups,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSecurityGroupsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSecurityGroupsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSecurityGroups API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your security groups.
    +//
    +// A security group is for use with instances either in the EC2-Classic platform
    +// or in a specific VPC. For more information, see Amazon EC2 Security Groups
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)
    +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your
    +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSecurityGroups for usage and error information.
    +func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) {
    +	req, out := c.DescribeSecurityGroupsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute"
    +
    +// DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSnapshotAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSnapshotAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSnapshotAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSnapshotAttributeRequest method.
    +//    req, resp := client.DescribeSnapshotAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSnapshotAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSnapshotAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSnapshotAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSnapshotAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the specified attribute of the specified snapshot. You can specify
    +// only one attribute at a time.
    +//
    +// For more information about EBS snapshots, see Amazon EBS Snapshots (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSnapshotAttribute for usage and error information.
    +func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) {
    +	req, out := c.DescribeSnapshotAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSnapshots = "DescribeSnapshots"
    +
    +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSnapshots operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSnapshots for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSnapshots method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSnapshotsRequest method.
    +//    req, resp := client.DescribeSnapshotsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSnapshots,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSnapshotsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSnapshotsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSnapshots API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of the EBS snapshots available to you. Available snapshots
    +// include public snapshots available for any AWS account to launch, private
    +// snapshots that you own, and private snapshots owned by another AWS account
    +// but for which you've been given explicit create volume permissions.
    +//
    +// The create volume permissions fall into the following categories:
    +//
    +//    * public: The owner of the snapshot granted create volume permissions
    +//    for the snapshot to the all group. All AWS accounts have create volume
    +//    permissions for these snapshots.
    +//
    +//    * explicit: The owner of the snapshot granted create volume permissions
    +//    to a specific AWS account.
    +//
    +//    * implicit: An AWS account has implicit create volume permissions for
    +//    all snapshots it owns.
    +//
    +// The list of snapshots returned can be modified by specifying snapshot IDs,
    +// snapshot owners, or AWS accounts with create volume permissions. If no options
    +// are specified, Amazon EC2 returns all snapshots for which you have create
    +// volume permissions.
    +//
    +// If you specify one or more snapshot IDs, only snapshots that have the specified
    +// IDs are returned. If you specify an invalid snapshot ID, an error is returned.
    +// If you specify a snapshot ID for which you do not have access, it is not
    +// included in the returned results.
    +//
    +// If you specify one or more snapshot owners using the OwnerIds option, only
    +// snapshots from the specified owners and for which you have access are returned.
    +// The results can include the AWS account IDs of the specified owners, amazon
    +// for snapshots owned by Amazon, or self for snapshots that you own.
    +//
    +// If you specify a list of restorable users, only snapshots with create snapshot
    +// permissions for those users are returned. You can specify AWS account IDs
    +// (if you own the snapshots), self for snapshots for which you own or have
    +// explicit permissions, or all for public snapshots.
    +//
    +// If you are describing a long list of snapshots, you can paginate the output
    +// to make the list more manageable. The MaxResults parameter sets the maximum
    +// number of results returned in a single page. If the list of results exceeds
    +// your MaxResults value, then that number of results is returned along with
    +// a NextToken value that can be passed to a subsequent DescribeSnapshots request
    +// to retrieve the remaining results.
    +//
    +// For more information about EBS snapshots, see Amazon EBS Snapshots (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSnapshots for usage and error information.
    +func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) {
    +	req, out := c.DescribeSnapshotsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeSnapshots method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeSnapshots operation.
    +//    pageNum := 0
    +//    err := client.DescribeSnapshotsPages(params,
    +//        func(page *DescribeSnapshotsOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeSnapshotsRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeSnapshotsOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription"
    +
    +// DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSpotDatafeedSubscription operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSpotDatafeedSubscription for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSpotDatafeedSubscription method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSpotDatafeedSubscriptionRequest method.
    +//    req, resp := client.DescribeSpotDatafeedSubscriptionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSpotDatafeedSubscription,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSpotDatafeedSubscriptionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSpotDatafeedSubscriptionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the data feed for Spot instances. For more information, see Spot
    +// Instance Data Feed (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSpotDatafeedSubscription for usage and error information.
    +func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) {
    +	req, out := c.DescribeSpotDatafeedSubscriptionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances"
    +
    +// DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSpotFleetInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSpotFleetInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSpotFleetInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSpotFleetInstancesRequest method.
    +//    req, resp := client.DescribeSpotFleetInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSpotFleetInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSpotFleetInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSpotFleetInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSpotFleetInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the running instances for the specified Spot fleet.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSpotFleetInstances for usage and error information.
    +func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) {
    +	req, out := c.DescribeSpotFleetInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory"
    +
    +// DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSpotFleetRequestHistory operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSpotFleetRequestHistory for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSpotFleetRequestHistory method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSpotFleetRequestHistoryRequest method.
    +//    req, resp := client.DescribeSpotFleetRequestHistoryRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSpotFleetRequestHistory,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSpotFleetRequestHistoryInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSpotFleetRequestHistoryOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSpotFleetRequestHistory API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the events for the specified Spot fleet request during the specified
    +// time.
    +//
    +// Spot fleet events are delayed by up to 30 seconds before they can be described.
    +// This ensures that you can query by the last evaluated time and not miss a
    +// recorded event.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSpotFleetRequestHistory for usage and error information.
    +func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) {
    +	req, out := c.DescribeSpotFleetRequestHistoryRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests"
    +
    +// DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSpotFleetRequests operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSpotFleetRequests for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSpotFleetRequests method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSpotFleetRequestsRequest method.
    +//    req, resp := client.DescribeSpotFleetRequestsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSpotFleetRequests,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSpotFleetRequestsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSpotFleetRequestsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSpotFleetRequests API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes your Spot fleet requests.
    +//
    +// Spot fleet requests are deleted 48 hours after they are canceled and their
    +// instances are terminated.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSpotFleetRequests for usage and error information.
    +func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) {
    +	req, out := c.DescribeSpotFleetRequestsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeSpotFleetRequestsPages iterates over the pages of a DescribeSpotFleetRequests operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeSpotFleetRequests method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeSpotFleetRequests operation.
    +//    pageNum := 0
    +//    err := client.DescribeSpotFleetRequestsPages(params,
    +//        func(page *DescribeSpotFleetRequestsOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeSpotFleetRequestsPages(input *DescribeSpotFleetRequestsInput, fn func(p *DescribeSpotFleetRequestsOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeSpotFleetRequestsRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeSpotFleetRequestsOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests"
    +
    +// DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSpotInstanceRequests operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSpotInstanceRequests for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSpotInstanceRequests method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSpotInstanceRequestsRequest method.
    +//    req, resp := client.DescribeSpotInstanceRequestsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSpotInstanceRequests,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSpotInstanceRequestsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSpotInstanceRequestsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSpotInstanceRequests API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the Spot instance requests that belong to your account. Spot instances
    +// are instances that Amazon EC2 launches when the bid price that you specify
    +// exceeds the current Spot price. Amazon EC2 periodically sets the Spot price
    +// based on available Spot instance capacity and current Spot instance requests.
    +// For more information, see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// You can use DescribeSpotInstanceRequests to find a running Spot instance
    +// by examining the response. If the status of the Spot instance is fulfilled,
    +// the instance ID appears in the response and contains the identifier of the
    +// instance. Alternatively, you can use DescribeInstances with a filter to look
    +// for instances where the instance lifecycle is spot.
    +//
    +// Spot instance requests are deleted 4 hours after they are canceled and their
    +// instances are terminated.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSpotInstanceRequests for usage and error information.
    +func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) {
    +	req, out := c.DescribeSpotInstanceRequestsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory"
    +
    +// DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSpotPriceHistory operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSpotPriceHistory for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSpotPriceHistory method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSpotPriceHistoryRequest method.
    +//    req, resp := client.DescribeSpotPriceHistoryRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSpotPriceHistory,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSpotPriceHistoryInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSpotPriceHistoryOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSpotPriceHistory API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the Spot price history. The prices returned are listed in chronological
    +// order, from the oldest to the most recent, for up to the past 90 days. For
    +// more information, see Spot Instance Pricing History (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// When you specify a start and end time, this operation returns the prices
    +// of the instance types within the time range that you specified and the time
    +// when the price changed. The price is valid within the time period that you
    +// specified; the response merely indicates the last time that the price changed.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSpotPriceHistory for usage and error information.
    +func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) {
    +	req, out := c.DescribeSpotPriceHistoryRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeSpotPriceHistoryPages iterates over the pages of a DescribeSpotPriceHistory operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeSpotPriceHistory method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeSpotPriceHistory operation.
    +//    pageNum := 0
    +//    err := client.DescribeSpotPriceHistoryPages(params,
    +//        func(page *DescribeSpotPriceHistoryOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeSpotPriceHistoryRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeSpotPriceHistoryOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups"
    +
    +// DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeStaleSecurityGroups operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeStaleSecurityGroups for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeStaleSecurityGroups method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeStaleSecurityGroupsRequest method.
    +//    req, resp := client.DescribeStaleSecurityGroupsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeStaleSecurityGroupsRequest(input *DescribeStaleSecurityGroupsInput) (req *request.Request, output *DescribeStaleSecurityGroupsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeStaleSecurityGroups,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeStaleSecurityGroupsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeStaleSecurityGroupsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeStaleSecurityGroups API operation for Amazon Elastic Compute Cloud.
    +//
    +// [EC2-VPC only] Describes the stale security group rules for security groups
    +// in a specified VPC. Rules are stale when they reference a deleted security
    +// group in a peer VPC, or a security group in a peer VPC for which the VPC
    +// peering connection has been deleted.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeStaleSecurityGroups for usage and error information.
    +func (c *EC2) DescribeStaleSecurityGroups(input *DescribeStaleSecurityGroupsInput) (*DescribeStaleSecurityGroupsOutput, error) {
    +	req, out := c.DescribeStaleSecurityGroupsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeSubnets = "DescribeSubnets"
    +
    +// DescribeSubnetsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeSubnets operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeSubnets for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeSubnets method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeSubnetsRequest method.
    +//    req, resp := client.DescribeSubnetsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeSubnets,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeSubnetsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeSubnetsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeSubnets API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your subnets.
    +//
    +// For more information about subnets, see Your VPC and Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeSubnets for usage and error information.
    +func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) {
    +	req, out := c.DescribeSubnetsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeTags = "DescribeTags"
    +
    +// DescribeTagsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeTags operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeTags for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeTags method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeTagsRequest method.
    +//    req, resp := client.DescribeTagsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeTags,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeTagsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeTagsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeTags API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of the tags for your EC2 resources.
    +//
    +// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeTags for usage and error information.
    +func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) {
    +	req, out := c.DescribeTagsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeTagsPages iterates over the pages of a DescribeTags operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeTags method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeTags operation.
    +//    pageNum := 0
    +//    err := client.DescribeTagsPages(params,
    +//        func(page *DescribeTagsOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeTagsRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeTagsOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeVolumeAttribute = "DescribeVolumeAttribute"
    +
    +// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVolumeAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVolumeAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVolumeAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVolumeAttributeRequest method.
    +//    req, resp := client.DescribeVolumeAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVolumeAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVolumeAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVolumeAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVolumeAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the specified attribute of the specified volume. You can specify
    +// only one attribute at a time.
    +//
    +// For more information about EBS volumes, see Amazon EBS Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVolumeAttribute for usage and error information.
    +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) {
    +	req, out := c.DescribeVolumeAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVolumeStatus = "DescribeVolumeStatus"
    +
    +// DescribeVolumeStatusRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVolumeStatus operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVolumeStatus for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVolumeStatus method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVolumeStatusRequest method.
    +//    req, resp := client.DescribeVolumeStatusRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVolumeStatus,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVolumeStatusInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVolumeStatusOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVolumeStatus API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the status of the specified volumes. Volume status provides the
    +// result of the checks performed on your volumes to determine events that can
    +// impair the performance of your volumes. The performance of a volume can be
    +// affected if an issue occurs on the volume's underlying host. If the volume's
    +// underlying host experiences a power outage or system issue, after the system
    +// is restored, there could be data inconsistencies on the volume. Volume events
    +// notify you if this occurs. Volume actions notify you if any action needs
    +// to be taken in response to the event.
    +//
    +// The DescribeVolumeStatus operation provides the following information about
    +// the specified volumes:
    +//
    +// Status: Reflects the current status of the volume. The possible values are
    +// ok, impaired , warning, or insufficient-data. If all checks pass, the overall
    +// status of the volume is ok. If the check fails, the overall status is impaired.
    +// If the status is insufficient-data, then the checks may still be taking place
    +// on your volume at the time. We recommend that you retry the request. For
    +// more information on volume status, see Monitoring the Status of Your Volumes
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html).
    +//
    +// Events: Reflect the cause of a volume status and may require you to take
    +// action. For example, if your volume returns an impaired status, then the
    +// volume event might be potential-data-inconsistency. This means that your
    +// volume has been affected by an issue with the underlying host, has all I/O
    +// operations disabled, and may have inconsistent data.
    +//
    +// Actions: Reflect the actions you may have to take in response to an event.
    +// For example, if the status of the volume is impaired and the volume event
    +// shows potential-data-inconsistency, then the action shows enable-volume-io.
    +// This means that you may want to enable the I/O operations for the volume
    +// by calling the EnableVolumeIO action and then check the volume for data consistency.
    +//
    +// Volume status is based on the volume status checks, and does not reflect
    +// the volume state. Therefore, volume status does not indicate volumes in the
    +// error state (for example, when a volume is incapable of accepting I/O.)
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVolumeStatus for usage and error information.
    +func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) {
    +	req, out := c.DescribeVolumeStatusRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeVolumeStatusPages iterates over the pages of a DescribeVolumeStatus operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeVolumeStatus method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeVolumeStatus operation.
    +//    pageNum := 0
    +//    err := client.DescribeVolumeStatusPages(params,
    +//        func(page *DescribeVolumeStatusOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeVolumeStatusRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeVolumeStatusOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeVolumes = "DescribeVolumes"
    +
    +// DescribeVolumesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVolumes operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVolumes for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVolumes method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVolumesRequest method.
    +//    req, resp := client.DescribeVolumesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVolumes,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +		Paginator: &request.Paginator{
    +			InputTokens:     []string{"NextToken"},
    +			OutputTokens:    []string{"NextToken"},
    +			LimitToken:      "MaxResults",
    +			TruncationToken: "",
    +		},
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVolumesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVolumesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVolumes API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the specified EBS volumes.
    +//
    +// If you are describing a long list of volumes, you can paginate the output
    +// to make the list more manageable. The MaxResults parameter sets the maximum
    +// number of results returned in a single page. If the list of results exceeds
    +// your MaxResults value, then that number of results is returned along with
    +// a NextToken value that can be passed to a subsequent DescribeVolumes request
    +// to retrieve the remaining results.
    +//
    +// For more information about EBS volumes, see Amazon EBS Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVolumes for usage and error information.
    +func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) {
    +	req, out := c.DescribeVolumesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// DescribeVolumesPages iterates over the pages of a DescribeVolumes operation,
    +// calling the "fn" function with the response data for each page. To stop
    +// iterating, return false from the fn function.
    +//
    +// See DescribeVolumes method for more information on how to use this operation.
    +//
    +// Note: This operation can generate multiple requests to a service.
    +//
    +//    // Example iterating over at most 3 pages of a DescribeVolumes operation.
    +//    pageNum := 0
    +//    err := client.DescribeVolumesPages(params,
    +//        func(page *DescribeVolumesOutput, lastPage bool) bool {
    +//            pageNum++
    +//            fmt.Println(page)
    +//            return pageNum <= 3
    +//        })
    +//
    +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error {
    +	page, _ := c.DescribeVolumesRequest(input)
    +	page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
    +	return page.EachPage(func(p interface{}, lastPage bool) bool {
    +		return fn(p.(*DescribeVolumesOutput), lastPage)
    +	})
    +}
    +
    +const opDescribeVpcAttribute = "DescribeVpcAttribute"
    +
    +// DescribeVpcAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcAttributeRequest method.
    +//    req, resp := client.DescribeVpcAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the specified attribute of the specified VPC. You can specify only
    +// one attribute at a time.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcAttribute for usage and error information.
    +func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) {
    +	req, out := c.DescribeVpcAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpcClassicLink = "DescribeVpcClassicLink"
    +
    +// DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcClassicLink operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcClassicLink for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcClassicLink method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcClassicLinkRequest method.
    +//    req, resp := client.DescribeVpcClassicLinkRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcClassicLink,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcClassicLinkInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcClassicLinkOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcClassicLink API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the ClassicLink status of one or more VPCs.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcClassicLink for usage and error information.
    +func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) {
    +	req, out := c.DescribeVpcClassicLinkRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport"
    +
    +// DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcClassicLinkDnsSupport for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcClassicLinkDnsSupport method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcClassicLinkDnsSupportRequest method.
    +//    req, resp := client.DescribeVpcClassicLinkDnsSupportRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcClassicLinkDnsSupport,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcClassicLinkDnsSupportInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcClassicLinkDnsSupportOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcClassicLinkDnsSupport API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes the ClassicLink DNS support status of one or more VPCs. If enabled,
    +// the DNS hostname of a linked EC2-Classic instance resolves to its private
    +// IP address when addressed from an instance in the VPC to which it's linked.
    +// Similarly, the DNS hostname of an instance in a VPC resolves to its private
    +// IP address when addressed from a linked EC2-Classic instance. For more information
    +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcClassicLinkDnsSupport for usage and error information.
    +func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) {
    +	req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices"
    +
    +// DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcEndpointServices operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcEndpointServices for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcEndpointServices method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcEndpointServicesRequest method.
    +//    req, resp := client.DescribeVpcEndpointServicesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcEndpointServices,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcEndpointServicesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcEndpointServicesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcEndpointServices API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes all supported AWS services that can be specified when creating
    +// a VPC endpoint.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcEndpointServices for usage and error information.
    +func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) {
    +	req, out := c.DescribeVpcEndpointServicesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpcEndpoints = "DescribeVpcEndpoints"
    +
    +// DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcEndpoints operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcEndpoints for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcEndpoints method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcEndpointsRequest method.
    +//    req, resp := client.DescribeVpcEndpointsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcEndpoints,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcEndpointsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcEndpointsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcEndpoints API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your VPC endpoints.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcEndpoints for usage and error information.
    +func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) {
    +	req, out := c.DescribeVpcEndpointsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections"
    +
    +// DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcPeeringConnections operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcPeeringConnections for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcPeeringConnections method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcPeeringConnectionsRequest method.
    +//    req, resp := client.DescribeVpcPeeringConnectionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcPeeringConnections,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcPeeringConnectionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcPeeringConnectionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcPeeringConnections API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your VPC peering connections.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcPeeringConnections for usage and error information.
    +func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) {
    +	req, out := c.DescribeVpcPeeringConnectionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpcs = "DescribeVpcs"
    +
    +// DescribeVpcsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpcs operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpcs for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpcs method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpcsRequest method.
    +//    req, resp := client.DescribeVpcsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpcs,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpcsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpcsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpcs API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your VPCs.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpcs for usage and error information.
    +func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) {
    +	req, out := c.DescribeVpcsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpnConnections = "DescribeVpnConnections"
    +
    +// DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpnConnections operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpnConnections for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpnConnections method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpnConnectionsRequest method.
    +//    req, resp := client.DescribeVpnConnectionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpnConnections,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpnConnectionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpnConnectionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpnConnections API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your VPN connections.
    +//
    +// For more information about VPN connections, see Adding a Hardware Virtual
    +// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpnConnections for usage and error information.
    +func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) {
    +	req, out := c.DescribeVpnConnectionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDescribeVpnGateways = "DescribeVpnGateways"
    +
    +// DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the
    +// client's request for the DescribeVpnGateways operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DescribeVpnGateways for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DescribeVpnGateways method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DescribeVpnGatewaysRequest method.
    +//    req, resp := client.DescribeVpnGatewaysRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) {
    +	op := &request.Operation{
    +		Name:       opDescribeVpnGateways,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DescribeVpnGatewaysInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DescribeVpnGatewaysOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DescribeVpnGateways API operation for Amazon Elastic Compute Cloud.
    +//
    +// Describes one or more of your virtual private gateways.
    +//
    +// For more information about virtual private gateways, see Adding an IPsec
    +// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DescribeVpnGateways for usage and error information.
    +func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) {
    +	req, out := c.DescribeVpnGatewaysRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDetachClassicLinkVpc = "DetachClassicLinkVpc"
    +
    +// DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the
    +// client's request for the DetachClassicLinkVpc operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DetachClassicLinkVpc for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DetachClassicLinkVpc method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DetachClassicLinkVpcRequest method.
    +//    req, resp := client.DetachClassicLinkVpcRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) {
    +	op := &request.Operation{
    +		Name:       opDetachClassicLinkVpc,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DetachClassicLinkVpcInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DetachClassicLinkVpcOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DetachClassicLinkVpc API operation for Amazon Elastic Compute Cloud.
    +//
    +// Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance
    +// has been unlinked, the VPC security groups are no longer associated with
    +// it. An instance is automatically unlinked from a VPC when it's stopped.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DetachClassicLinkVpc for usage and error information.
    +func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) {
    +	req, out := c.DetachClassicLinkVpcRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDetachInternetGateway = "DetachInternetGateway"
    +
    +// DetachInternetGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the DetachInternetGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DetachInternetGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DetachInternetGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DetachInternetGatewayRequest method.
    +//    req, resp := client.DetachInternetGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opDetachInternetGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DetachInternetGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DetachInternetGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DetachInternetGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Detaches an Internet gateway from a VPC, disabling connectivity between the
    +// Internet and the VPC. The VPC must not contain any running instances with
    +// Elastic IP addresses.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DetachInternetGateway for usage and error information.
    +func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) {
    +	req, out := c.DetachInternetGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDetachNetworkInterface = "DetachNetworkInterface"
    +
    +// DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the
    +// client's request for the DetachNetworkInterface operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DetachNetworkInterface for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DetachNetworkInterface method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DetachNetworkInterfaceRequest method.
    +//    req, resp := client.DetachNetworkInterfaceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) {
    +	op := &request.Operation{
    +		Name:       opDetachNetworkInterface,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DetachNetworkInterfaceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DetachNetworkInterfaceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DetachNetworkInterface API operation for Amazon Elastic Compute Cloud.
    +//
    +// Detaches a network interface from an instance.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DetachNetworkInterface for usage and error information.
    +func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) {
    +	req, out := c.DetachNetworkInterfaceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDetachVolume = "DetachVolume"
    +
    +// DetachVolumeRequest generates a "aws/request.Request" representing the
    +// client's request for the DetachVolume operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DetachVolume for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DetachVolume method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DetachVolumeRequest method.
    +//    req, resp := client.DetachVolumeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) {
    +	op := &request.Operation{
    +		Name:       opDetachVolume,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DetachVolumeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &VolumeAttachment{}
    +	req.Data = output
    +	return
    +}
    +
    +// DetachVolume API operation for Amazon Elastic Compute Cloud.
    +//
    +// Detaches an EBS volume from an instance. Make sure to unmount any file systems
    +// on the device within your operating system before detaching the volume. Failure
    +// to do so can result in the volume becoming stuck in the busy state while
    +// detaching. If this happens, detachment can be delayed indefinitely until
    +// you unmount the volume, force detachment, reboot the instance, or all three.
    +// If an EBS volume is the root device of an instance, it can't be detached
    +// while the instance is running. To detach the root volume, stop the instance
    +// first.
    +//
    +// When a volume with an AWS Marketplace product code is detached from an instance,
    +// the product code is no longer associated with the instance.
    +//
    +// For more information, see Detaching an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DetachVolume for usage and error information.
    +func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) {
    +	req, out := c.DetachVolumeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDetachVpnGateway = "DetachVpnGateway"
    +
    +// DetachVpnGatewayRequest generates a "aws/request.Request" representing the
    +// client's request for the DetachVpnGateway operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DetachVpnGateway for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DetachVpnGateway method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DetachVpnGatewayRequest method.
    +//    req, resp := client.DetachVpnGatewayRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) {
    +	op := &request.Operation{
    +		Name:       opDetachVpnGateway,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DetachVpnGatewayInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DetachVpnGatewayOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DetachVpnGateway API operation for Amazon Elastic Compute Cloud.
    +//
    +// Detaches a virtual private gateway from a VPC. You do this if you're planning
    +// to turn off the VPC and not use it anymore. You can confirm a virtual private
    +// gateway has been completely detached from a VPC by describing the virtual
    +// private gateway (any attachments to the virtual private gateway are also
    +// described).
    +//
    +// You must wait for the attachment's state to switch to detached before you
    +// can delete the VPC or attach a different VPC to the virtual private gateway.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DetachVpnGateway for usage and error information.
    +func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) {
    +	req, out := c.DetachVpnGatewayRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation"
    +
    +// DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the
    +// client's request for the DisableVgwRoutePropagation operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DisableVgwRoutePropagation for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DisableVgwRoutePropagation method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DisableVgwRoutePropagationRequest method.
    +//    req, resp := client.DisableVgwRoutePropagationRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) {
    +	op := &request.Operation{
    +		Name:       opDisableVgwRoutePropagation,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DisableVgwRoutePropagationInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DisableVgwRoutePropagationOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DisableVgwRoutePropagation API operation for Amazon Elastic Compute Cloud.
    +//
    +// Disables a virtual private gateway (VGW) from propagating routes to a specified
    +// route table of a VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DisableVgwRoutePropagation for usage and error information.
    +func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) {
    +	req, out := c.DisableVgwRoutePropagationRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDisableVpcClassicLink = "DisableVpcClassicLink"
    +
    +// DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the
    +// client's request for the DisableVpcClassicLink operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DisableVpcClassicLink for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DisableVpcClassicLink method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DisableVpcClassicLinkRequest method.
    +//    req, resp := client.DisableVpcClassicLinkRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) {
    +	op := &request.Operation{
    +		Name:       opDisableVpcClassicLink,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DisableVpcClassicLinkInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DisableVpcClassicLinkOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DisableVpcClassicLink API operation for Amazon Elastic Compute Cloud.
    +//
    +// Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC
    +// that has EC2-Classic instances linked to it.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DisableVpcClassicLink for usage and error information.
    +func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) {
    +	req, out := c.DisableVpcClassicLinkRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport"
    +
    +// DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the
    +// client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DisableVpcClassicLinkDnsSupport for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DisableVpcClassicLinkDnsSupport method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DisableVpcClassicLinkDnsSupportRequest method.
    +//    req, resp := client.DisableVpcClassicLinkDnsSupportRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) {
    +	op := &request.Operation{
    +		Name:       opDisableVpcClassicLinkDnsSupport,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DisableVpcClassicLinkDnsSupportInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DisableVpcClassicLinkDnsSupportOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DisableVpcClassicLinkDnsSupport API operation for Amazon Elastic Compute Cloud.
    +//
    +// Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve
    +// to public IP addresses when addressed between a linked EC2-Classic instance
    +// and instances in the VPC to which it's linked. For more information about
    +// ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DisableVpcClassicLinkDnsSupport for usage and error information.
    +func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) {
    +	req, out := c.DisableVpcClassicLinkDnsSupportRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDisassociateAddress = "DisassociateAddress"
    +
    +// DisassociateAddressRequest generates a "aws/request.Request" representing the
    +// client's request for the DisassociateAddress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DisassociateAddress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DisassociateAddress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DisassociateAddressRequest method.
    +//    req, resp := client.DisassociateAddressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) {
    +	op := &request.Operation{
    +		Name:       opDisassociateAddress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DisassociateAddressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DisassociateAddressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DisassociateAddress API operation for Amazon Elastic Compute Cloud.
    +//
    +// Disassociates an Elastic IP address from the instance or network interface
    +// it's associated with.
    +//
    +// An Elastic IP address is for use in either the EC2-Classic platform or in
    +// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// This is an idempotent operation. If you perform the operation more than once,
    +// Amazon EC2 doesn't return an error.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DisassociateAddress for usage and error information.
    +func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) {
    +	req, out := c.DisassociateAddressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDisassociateRouteTable = "DisassociateRouteTable"
    +
    +// DisassociateRouteTableRequest generates a "aws/request.Request" representing the
    +// client's request for the DisassociateRouteTable operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DisassociateRouteTable for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DisassociateRouteTable method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DisassociateRouteTableRequest method.
    +//    req, resp := client.DisassociateRouteTableRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) {
    +	op := &request.Operation{
    +		Name:       opDisassociateRouteTable,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DisassociateRouteTableInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &DisassociateRouteTableOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DisassociateRouteTable API operation for Amazon Elastic Compute Cloud.
    +//
    +// Disassociates a subnet from a route table.
    +//
    +// After you perform this action, the subnet no longer uses the routes in the
    +// route table. Instead, it uses the routes in the VPC's main route table. For
    +// more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation DisassociateRouteTable for usage and error information.
    +func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) {
    +	req, out := c.DisassociateRouteTableRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation"
    +
    +// EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the
    +// client's request for the EnableVgwRoutePropagation operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See EnableVgwRoutePropagation for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the EnableVgwRoutePropagation method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the EnableVgwRoutePropagationRequest method.
    +//    req, resp := client.EnableVgwRoutePropagationRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) {
    +	op := &request.Operation{
    +		Name:       opEnableVgwRoutePropagation,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &EnableVgwRoutePropagationInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &EnableVgwRoutePropagationOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// EnableVgwRoutePropagation API operation for Amazon Elastic Compute Cloud.
    +//
    +// Enables a virtual private gateway (VGW) to propagate routes to the specified
    +// route table of a VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation EnableVgwRoutePropagation for usage and error information.
    +func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) {
    +	req, out := c.EnableVgwRoutePropagationRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opEnableVolumeIO = "EnableVolumeIO"
    +
    +// EnableVolumeIORequest generates a "aws/request.Request" representing the
    +// client's request for the EnableVolumeIO operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See EnableVolumeIO for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the EnableVolumeIO method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the EnableVolumeIORequest method.
    +//    req, resp := client.EnableVolumeIORequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) {
    +	op := &request.Operation{
    +		Name:       opEnableVolumeIO,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &EnableVolumeIOInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &EnableVolumeIOOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// EnableVolumeIO API operation for Amazon Elastic Compute Cloud.
    +//
    +// Enables I/O operations for a volume that had I/O operations disabled because
    +// the data on the volume was potentially inconsistent.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation EnableVolumeIO for usage and error information.
    +func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) {
    +	req, out := c.EnableVolumeIORequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opEnableVpcClassicLink = "EnableVpcClassicLink"
    +
    +// EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the
    +// client's request for the EnableVpcClassicLink operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See EnableVpcClassicLink for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the EnableVpcClassicLink method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the EnableVpcClassicLinkRequest method.
    +//    req, resp := client.EnableVpcClassicLinkRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) {
    +	op := &request.Operation{
    +		Name:       opEnableVpcClassicLink,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &EnableVpcClassicLinkInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &EnableVpcClassicLinkOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// EnableVpcClassicLink API operation for Amazon Elastic Compute Cloud.
    +//
    +// Enables a VPC for ClassicLink. You can then link EC2-Classic instances to
    +// your ClassicLink-enabled VPC to allow communication over private IP addresses.
    +// You cannot enable your VPC for ClassicLink if any of your VPC's route tables
    +// have existing routes for address ranges within the 10.0.0.0/8 IP address
    +// range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16
    +// IP address ranges. For more information, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation EnableVpcClassicLink for usage and error information.
    +func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) {
    +	req, out := c.EnableVpcClassicLinkRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport"
    +
    +// EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the
    +// client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See EnableVpcClassicLinkDnsSupport for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the EnableVpcClassicLinkDnsSupport method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the EnableVpcClassicLinkDnsSupportRequest method.
    +//    req, resp := client.EnableVpcClassicLinkDnsSupportRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) {
    +	op := &request.Operation{
    +		Name:       opEnableVpcClassicLinkDnsSupport,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &EnableVpcClassicLinkDnsSupportInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &EnableVpcClassicLinkDnsSupportOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// EnableVpcClassicLinkDnsSupport API operation for Amazon Elastic Compute Cloud.
    +//
    +// Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled,
    +// the DNS hostname of a linked EC2-Classic instance resolves to its private
    +// IP address when addressed from an instance in the VPC to which it's linked.
    +// Similarly, the DNS hostname of an instance in a VPC resolves to its private
    +// IP address when addressed from a linked EC2-Classic instance. For more information
    +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation EnableVpcClassicLinkDnsSupport for usage and error information.
    +func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) {
    +	req, out := c.EnableVpcClassicLinkDnsSupportRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetConsoleOutput = "GetConsoleOutput"
    +
    +// GetConsoleOutputRequest generates a "aws/request.Request" representing the
    +// client's request for the GetConsoleOutput operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetConsoleOutput for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetConsoleOutput method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetConsoleOutputRequest method.
    +//    req, resp := client.GetConsoleOutputRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) {
    +	op := &request.Operation{
    +		Name:       opGetConsoleOutput,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetConsoleOutputInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetConsoleOutputOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetConsoleOutput API operation for Amazon Elastic Compute Cloud.
    +//
    +// Gets the console output for the specified instance.
    +//
    +// Instances do not have a physical monitor through which you can view their
    +// console output. They also lack physical controls that allow you to power
    +// up, reboot, or shut them down. To allow these actions, we provide them through
    +// the Amazon EC2 API and command line interface.
    +//
    +// Instance console output is buffered and posted shortly after instance boot,
    +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output
    +// which is available for at least one hour after the most recent post.
    +//
    +// For Linux instances, the instance console output displays the exact console
    +// output that would normally be displayed on a physical monitor attached to
    +// a computer. This output is buffered because the instance produces it and
    +// then posts it to a store where the instance's owner can retrieve it.
    +//
    +// For Windows instances, the instance console output includes output from the
    +// EC2Config service.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation GetConsoleOutput for usage and error information.
    +func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) {
    +	req, out := c.GetConsoleOutputRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetConsoleScreenshot = "GetConsoleScreenshot"
    +
    +// GetConsoleScreenshotRequest generates a "aws/request.Request" representing the
    +// client's request for the GetConsoleScreenshot operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetConsoleScreenshot for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetConsoleScreenshot method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetConsoleScreenshotRequest method.
    +//    req, resp := client.GetConsoleScreenshotRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req *request.Request, output *GetConsoleScreenshotOutput) {
    +	op := &request.Operation{
    +		Name:       opGetConsoleScreenshot,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetConsoleScreenshotInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetConsoleScreenshotOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetConsoleScreenshot API operation for Amazon Elastic Compute Cloud.
    +//
    +// Retrieve a JPG-format screenshot of a running instance to help with troubleshooting.
    +//
    +// The returned content is Base64-encoded.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation GetConsoleScreenshot for usage and error information.
    +func (c *EC2) GetConsoleScreenshot(input *GetConsoleScreenshotInput) (*GetConsoleScreenshotOutput, error) {
    +	req, out := c.GetConsoleScreenshotRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview"
    +
    +// GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the
    +// client's request for the GetHostReservationPurchasePreview operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetHostReservationPurchasePreview for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetHostReservationPurchasePreview method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetHostReservationPurchasePreviewRequest method.
    +//    req, resp := client.GetHostReservationPurchasePreviewRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) GetHostReservationPurchasePreviewRequest(input *GetHostReservationPurchasePreviewInput) (req *request.Request, output *GetHostReservationPurchasePreviewOutput) {
    +	op := &request.Operation{
    +		Name:       opGetHostReservationPurchasePreview,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetHostReservationPurchasePreviewInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetHostReservationPurchasePreviewOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetHostReservationPurchasePreview API operation for Amazon Elastic Compute Cloud.
    +//
    +// Preview a reservation purchase with configurations that match those of your
    +// Dedicated Host. You must have active Dedicated Hosts in your account before
    +// you purchase a reservation.
    +//
    +// This is a preview of the PurchaseHostReservation action and does not result
    +// in the offering being purchased.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation GetHostReservationPurchasePreview for usage and error information.
    +func (c *EC2) GetHostReservationPurchasePreview(input *GetHostReservationPurchasePreviewInput) (*GetHostReservationPurchasePreviewOutput, error) {
    +	req, out := c.GetHostReservationPurchasePreviewRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetPasswordData = "GetPasswordData"
    +
    +// GetPasswordDataRequest generates a "aws/request.Request" representing the
    +// client's request for the GetPasswordData operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetPasswordData for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetPasswordData method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetPasswordDataRequest method.
    +//    req, resp := client.GetPasswordDataRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) {
    +	op := &request.Operation{
    +		Name:       opGetPasswordData,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetPasswordDataInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetPasswordDataOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetPasswordData API operation for Amazon Elastic Compute Cloud.
    +//
    +// Retrieves the encrypted administrator password for an instance running Windows.
    +//
    +// The Windows password is generated at boot if the EC2Config service plugin,
    +// Ec2SetPassword, is enabled. This usually only happens the first time an AMI
    +// is launched, and then Ec2SetPassword is automatically disabled. The password
    +// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before
    +// bundling.
    +//
    +// The password is encrypted using the key pair that you specified when you
    +// launched the instance. You must provide the corresponding key pair file.
    +//
    +// Password generation and encryption takes a few moments. We recommend that
    +// you wait up to 15 minutes after launching an instance before trying to retrieve
    +// the generated password.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation GetPasswordData for usage and error information.
    +func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) {
    +	req, out := c.GetPasswordDataRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetReservedInstancesExchangeQuote = "GetReservedInstancesExchangeQuote"
    +
    +// GetReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the
    +// client's request for the GetReservedInstancesExchangeQuote operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetReservedInstancesExchangeQuote for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetReservedInstancesExchangeQuote method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetReservedInstancesExchangeQuoteRequest method.
    +//    req, resp := client.GetReservedInstancesExchangeQuoteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) GetReservedInstancesExchangeQuoteRequest(input *GetReservedInstancesExchangeQuoteInput) (req *request.Request, output *GetReservedInstancesExchangeQuoteOutput) {
    +	op := &request.Operation{
    +		Name:       opGetReservedInstancesExchangeQuote,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetReservedInstancesExchangeQuoteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetReservedInstancesExchangeQuoteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud.
    +//
    +// Returns details about the values and term of your specified Convertible Reserved
    +// Instances. When an offering ID is specified it returns information about
    +// whether the exchange is valid and can be performed.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation GetReservedInstancesExchangeQuote for usage and error information.
    +func (c *EC2) GetReservedInstancesExchangeQuote(input *GetReservedInstancesExchangeQuoteInput) (*GetReservedInstancesExchangeQuoteOutput, error) {
    +	req, out := c.GetReservedInstancesExchangeQuoteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opImportImage = "ImportImage"
    +
    +// ImportImageRequest generates a "aws/request.Request" representing the
    +// client's request for the ImportImage operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ImportImage for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ImportImage method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ImportImageRequest method.
    +//    req, resp := client.ImportImageRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) {
    +	op := &request.Operation{
    +		Name:       opImportImage,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ImportImageInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ImportImageOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ImportImage API operation for Amazon Elastic Compute Cloud.
    +//
    +// Import single or multi-volume disk images or EBS snapshots into an Amazon
    +// Machine Image (AMI). For more information, see Importing a VM as an Image
    +// Using VM Import/Export (http://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html)
    +// in the VM Import/Export User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ImportImage for usage and error information.
    +func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) {
    +	req, out := c.ImportImageRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opImportInstance = "ImportInstance"
    +
    +// ImportInstanceRequest generates a "aws/request.Request" representing the
    +// client's request for the ImportInstance operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ImportInstance for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ImportInstance method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ImportInstanceRequest method.
    +//    req, resp := client.ImportInstanceRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) {
    +	op := &request.Operation{
    +		Name:       opImportInstance,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ImportInstanceInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ImportInstanceOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ImportInstance API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates an import instance task using metadata from the specified disk image.
    +// ImportInstance only supports single-volume VMs. To import multi-volume VMs,
    +// use ImportImage. For more information, see Importing a Virtual Machine Using
    +// the Amazon EC2 CLI (http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html).
    +//
    +// For information about the import manifest referenced by this API action,
    +// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ImportInstance for usage and error information.
    +func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) {
    +	req, out := c.ImportInstanceRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opImportKeyPair = "ImportKeyPair"
    +
    +// ImportKeyPairRequest generates a "aws/request.Request" representing the
    +// client's request for the ImportKeyPair operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ImportKeyPair for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ImportKeyPair method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ImportKeyPairRequest method.
    +//    req, resp := client.ImportKeyPairRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) {
    +	op := &request.Operation{
    +		Name:       opImportKeyPair,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ImportKeyPairInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ImportKeyPairOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ImportKeyPair API operation for Amazon Elastic Compute Cloud.
    +//
    +// Imports the public key from an RSA key pair that you created with a third-party
    +// tool. Compare this with CreateKeyPair, in which AWS creates the key pair
    +// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair,
    +// you create the key pair and give AWS just the public key. The private key
    +// is never transferred between you and AWS.
    +//
    +// For more information about key pairs, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ImportKeyPair for usage and error information.
    +func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) {
    +	req, out := c.ImportKeyPairRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opImportSnapshot = "ImportSnapshot"
    +
    +// ImportSnapshotRequest generates a "aws/request.Request" representing the
    +// client's request for the ImportSnapshot operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ImportSnapshot for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ImportSnapshot method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ImportSnapshotRequest method.
    +//    req, resp := client.ImportSnapshotRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) {
    +	op := &request.Operation{
    +		Name:       opImportSnapshot,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ImportSnapshotInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ImportSnapshotOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ImportSnapshot API operation for Amazon Elastic Compute Cloud.
    +//
    +// Imports a disk into an EBS snapshot.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ImportSnapshot for usage and error information.
    +func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) {
    +	req, out := c.ImportSnapshotRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opImportVolume = "ImportVolume"
    +
    +// ImportVolumeRequest generates a "aws/request.Request" representing the
    +// client's request for the ImportVolume operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ImportVolume for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ImportVolume method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ImportVolumeRequest method.
    +//    req, resp := client.ImportVolumeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) {
    +	op := &request.Operation{
    +		Name:       opImportVolume,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ImportVolumeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ImportVolumeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ImportVolume API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates an import volume task using metadata from the specified disk image.For
    +// more information, see Importing Disks to Amazon EBS (http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/importing-your-volumes-into-amazon-ebs.html).
    +//
    +// For information about the import manifest referenced by this API action,
    +// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ImportVolume for usage and error information.
    +func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) {
    +	req, out := c.ImportVolumeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyHosts = "ModifyHosts"
    +
    +// ModifyHostsRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyHosts operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyHosts for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyHosts method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyHostsRequest method.
    +//    req, resp := client.ModifyHostsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, output *ModifyHostsOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyHosts,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyHostsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ModifyHostsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyHosts API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modify the auto-placement setting of a Dedicated Host. When auto-placement
    +// is enabled, AWS will place instances that you launch with a tenancy of host,
    +// but without targeting a specific host ID, onto any available Dedicated Host
    +// in your account which has auto-placement enabled. When auto-placement is
    +// disabled, you need to provide a host ID if you want the instance to launch
    +// onto a specific host. If no host ID is provided, the instance will be launched
    +// onto a suitable host which has auto-placement enabled.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyHosts for usage and error information.
    +func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) {
    +	req, out := c.ModifyHostsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyIdFormat = "ModifyIdFormat"
    +
    +// ModifyIdFormatRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyIdFormat operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyIdFormat for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyIdFormat method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyIdFormatRequest method.
    +//    req, resp := client.ModifyIdFormatRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Request, output *ModifyIdFormatOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyIdFormat,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyIdFormatInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyIdFormatOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyIdFormat API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the ID format for the specified resource on a per-region basis.
    +// You can specify that resources should receive longer IDs (17-character IDs)
    +// when they are created. The following resource types support longer IDs: instance
    +// | reservation | snapshot | volume.
    +//
    +// This setting applies to the IAM user who makes the request; it does not apply
    +// to the entire AWS account. By default, an IAM user defaults to the same settings
    +// as the root user. If you're using this action as the root user, then these
    +// settings apply to the entire account, unless an IAM user explicitly overrides
    +// these settings for themselves. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Resources created with longer IDs are visible to all IAM roles and users,
    +// regardless of these settings and provided that they have permission to use
    +// the relevant Describe command for the resource type.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyIdFormat for usage and error information.
    +func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) {
    +	req, out := c.ModifyIdFormatRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyIdentityIdFormat = "ModifyIdentityIdFormat"
    +
    +// ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyIdentityIdFormat operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyIdentityIdFormat for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyIdentityIdFormat method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyIdentityIdFormatRequest method.
    +//    req, resp := client.ModifyIdentityIdFormatRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput) (req *request.Request, output *ModifyIdentityIdFormatOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyIdentityIdFormat,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyIdentityIdFormatInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyIdentityIdFormatOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyIdentityIdFormat API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the ID format of a resource for a specified IAM user, IAM role,
    +// or the root user for an account; or all IAM users, IAM roles, and the root
    +// user for an account. You can specify that resources should receive longer
    +// IDs (17-character IDs) when they are created.
    +//
    +// The following resource types support longer IDs: instance | reservation |
    +// snapshot | volume. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// This setting applies to the principal specified in the request; it does not
    +// apply to the principal that makes the request.
    +//
    +// Resources created with longer IDs are visible to all IAM roles and users,
    +// regardless of these settings and provided that they have permission to use
    +// the relevant Describe command for the resource type.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyIdentityIdFormat for usage and error information.
    +func (c *EC2) ModifyIdentityIdFormat(input *ModifyIdentityIdFormatInput) (*ModifyIdentityIdFormatOutput, error) {
    +	req, out := c.ModifyIdentityIdFormatRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyImageAttribute = "ModifyImageAttribute"
    +
    +// ModifyImageAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyImageAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyImageAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyImageAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyImageAttributeRequest method.
    +//    req, resp := client.ModifyImageAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyImageAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyImageAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyImageAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyImageAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the specified attribute of the specified AMI. You can specify only
    +// one attribute at a time.
    +//
    +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace
    +// product code cannot be made public.
    +//
    +// The SriovNetSupport enhanced networking attribute cannot be changed using
    +// this command. Instead, enable SriovNetSupport on an instance and create an
    +// AMI from the instance. This will result in an image with SriovNetSupport
    +// enabled.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyImageAttribute for usage and error information.
    +func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) {
    +	req, out := c.ModifyImageAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyInstanceAttribute = "ModifyInstanceAttribute"
    +
    +// ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyInstanceAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyInstanceAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyInstanceAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyInstanceAttributeRequest method.
    +//    req, resp := client.ModifyInstanceAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyInstanceAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyInstanceAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyInstanceAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyInstanceAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the specified attribute of the specified instance. You can specify
    +// only one attribute at a time.
    +//
    +// To modify some attributes, the instance must be stopped. For more information,
    +// see Modifying Attributes of a Stopped Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyInstanceAttribute for usage and error information.
    +func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) {
    +	req, out := c.ModifyInstanceAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyInstancePlacement = "ModifyInstancePlacement"
    +
    +// ModifyInstancePlacementRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyInstancePlacement operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyInstancePlacement for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyInstancePlacement method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyInstancePlacementRequest method.
    +//    req, resp := client.ModifyInstancePlacementRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput) (req *request.Request, output *ModifyInstancePlacementOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyInstancePlacement,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyInstancePlacementInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ModifyInstancePlacementOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyInstancePlacement API operation for Amazon Elastic Compute Cloud.
    +//
    +// Set the instance affinity value for a specific stopped instance and modify
    +// the instance tenancy setting.
    +//
    +// Instance affinity is disabled by default. When instance affinity is host
    +// and it is not associated with a specific Dedicated Host, the next time it
    +// is launched it will automatically be associated with the host it lands on.
    +// This relationship will persist if the instance is stopped/started, or rebooted.
    +//
    +// You can modify the host ID associated with a stopped instance. If a stopped
    +// instance has a new host ID association, the instance will target that host
    +// when restarted.
    +//
    +// You can modify the tenancy of a stopped instance with a tenancy of host or
    +// dedicated.
    +//
    +// Affinity, hostID, and tenancy are not required parameters, but at least one
    +// of them must be specified in the request. Affinity and tenancy can be modified
    +// in the same request, but tenancy can only be modified on instances that are
    +// stopped.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyInstancePlacement for usage and error information.
    +func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*ModifyInstancePlacementOutput, error) {
    +	req, out := c.ModifyInstancePlacementRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute"
    +
    +// ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyNetworkInterfaceAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyNetworkInterfaceAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyNetworkInterfaceAttributeRequest method.
    +//    req, resp := client.ModifyNetworkInterfaceAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyNetworkInterfaceAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyNetworkInterfaceAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyNetworkInterfaceAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyNetworkInterfaceAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the specified network interface attribute. You can specify only
    +// one attribute at a time.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyNetworkInterfaceAttribute for usage and error information.
    +func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) {
    +	req, out := c.ModifyNetworkInterfaceAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyReservedInstances = "ModifyReservedInstances"
    +
    +// ModifyReservedInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyReservedInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyReservedInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyReservedInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyReservedInstancesRequest method.
    +//    req, resp := client.ModifyReservedInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyReservedInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyReservedInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ModifyReservedInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyReservedInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the Availability Zone, instance count, instance type, or network
    +// platform (EC2-Classic or EC2-VPC) of your Standard Reserved Instances. The
    +// Reserved Instances to be modified must be identical, except for Availability
    +// Zone, network platform, and instance type.
    +//
    +// For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyReservedInstances for usage and error information.
    +func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) {
    +	req, out := c.ModifyReservedInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifySnapshotAttribute = "ModifySnapshotAttribute"
    +
    +// ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifySnapshotAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifySnapshotAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifySnapshotAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifySnapshotAttributeRequest method.
    +//    req, resp := client.ModifySnapshotAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifySnapshotAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifySnapshotAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifySnapshotAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifySnapshotAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Adds or removes permission settings for the specified snapshot. You may add
    +// or remove specified AWS account IDs from a snapshot's list of create volume
    +// permissions, but you cannot do both in a single API call. If you need to
    +// both add and remove account IDs for a snapshot, you must use multiple API
    +// calls.
    +//
    +// Encrypted snapshots and snapshots with AWS Marketplace product codes cannot
    +// be made public. Snapshots encrypted with your default CMK cannot be shared
    +// with other accounts.
    +//
    +// For more information on modifying snapshot permissions, see Sharing Snapshots
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifySnapshotAttribute for usage and error information.
    +func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) {
    +	req, out := c.ModifySnapshotAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifySpotFleetRequest = "ModifySpotFleetRequest"
    +
    +// ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifySpotFleetRequest operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifySpotFleetRequest for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifySpotFleetRequest method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifySpotFleetRequestRequest method.
    +//    req, resp := client.ModifySpotFleetRequestRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) {
    +	op := &request.Operation{
    +		Name:       opModifySpotFleetRequest,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifySpotFleetRequestInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ModifySpotFleetRequestOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifySpotFleetRequest API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the specified Spot fleet request.
    +//
    +// While the Spot fleet request is being modified, it is in the modifying state.
    +//
    +// To scale up your Spot fleet, increase its target capacity. The Spot fleet
    +// launches the additional Spot instances according to the allocation strategy
    +// for the Spot fleet request. If the allocation strategy is lowestPrice, the
    +// Spot fleet launches instances using the Spot pool with the lowest price.
    +// If the allocation strategy is diversified, the Spot fleet distributes the
    +// instances across the Spot pools.
    +//
    +// To scale down your Spot fleet, decrease its target capacity. First, the Spot
    +// fleet cancels any open bids that exceed the new target capacity. You can
    +// request that the Spot fleet terminate Spot instances until the size of the
    +// fleet no longer exceeds the new target capacity. If the allocation strategy
    +// is lowestPrice, the Spot fleet terminates the instances with the highest
    +// price per unit. If the allocation strategy is diversified, the Spot fleet
    +// terminates instances across the Spot pools. Alternatively, you can request
    +// that the Spot fleet keep the fleet at its current size, but not replace any
    +// Spot instances that are interrupted or that you terminate manually.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifySpotFleetRequest for usage and error information.
    +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) {
    +	req, out := c.ModifySpotFleetRequestRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifySubnetAttribute = "ModifySubnetAttribute"
    +
    +// ModifySubnetAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifySubnetAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifySubnetAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifySubnetAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifySubnetAttributeRequest method.
    +//    req, resp := client.ModifySubnetAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifySubnetAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifySubnetAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifySubnetAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifySubnetAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies a subnet attribute.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifySubnetAttribute for usage and error information.
    +func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) {
    +	req, out := c.ModifySubnetAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyVolumeAttribute = "ModifyVolumeAttribute"
    +
    +// ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyVolumeAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyVolumeAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyVolumeAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyVolumeAttributeRequest method.
    +//    req, resp := client.ModifyVolumeAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyVolumeAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyVolumeAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyVolumeAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyVolumeAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies a volume attribute.
    +//
    +// By default, all I/O operations for the volume are suspended when the data
    +// on the volume is determined to be potentially inconsistent, to prevent undetectable,
    +// latent data corruption. The I/O access to the volume can be resumed by first
    +// enabling I/O access and then checking the data consistency on your volume.
    +//
    +// You can change the default behavior to resume I/O operations. We recommend
    +// that you change this only for boot volumes or for volumes that are stateless
    +// or disposable.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyVolumeAttribute for usage and error information.
    +func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) {
    +	req, out := c.ModifyVolumeAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyVpcAttribute = "ModifyVpcAttribute"
    +
    +// ModifyVpcAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyVpcAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyVpcAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyVpcAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyVpcAttributeRequest method.
    +//    req, resp := client.ModifyVpcAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyVpcAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyVpcAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ModifyVpcAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyVpcAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the specified attribute of the specified VPC.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyVpcAttribute for usage and error information.
    +func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) {
    +	req, out := c.ModifyVpcAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyVpcEndpoint = "ModifyVpcEndpoint"
    +
    +// ModifyVpcEndpointRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyVpcEndpoint operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyVpcEndpoint for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyVpcEndpoint method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyVpcEndpointRequest method.
    +//    req, resp := client.ModifyVpcEndpointRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyVpcEndpoint,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyVpcEndpointInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ModifyVpcEndpointOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyVpcEndpoint API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies attributes of a specified VPC endpoint. You can modify the policy
    +// associated with the endpoint, and you can add and remove route tables associated
    +// with the endpoint.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyVpcEndpoint for usage and error information.
    +func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) {
    +	req, out := c.ModifyVpcEndpointRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions"
    +
    +// ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the
    +// client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ModifyVpcPeeringConnectionOptions for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ModifyVpcPeeringConnectionOptions method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ModifyVpcPeeringConnectionOptionsRequest method.
    +//    req, resp := client.ModifyVpcPeeringConnectionOptionsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringConnectionOptionsInput) (req *request.Request, output *ModifyVpcPeeringConnectionOptionsOutput) {
    +	op := &request.Operation{
    +		Name:       opModifyVpcPeeringConnectionOptions,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ModifyVpcPeeringConnectionOptionsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ModifyVpcPeeringConnectionOptionsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ModifyVpcPeeringConnectionOptions API operation for Amazon Elastic Compute Cloud.
    +//
    +// Modifies the VPC peering connection options on one side of a VPC peering
    +// connection. You can do the following:
    +//
    +//    * Enable/disable communication over the peering connection between an
    +//    EC2-Classic instance that's linked to your VPC (using ClassicLink) and
    +//    instances in the peer VPC.
    +//
    +//    * Enable/disable communication over the peering connection between instances
    +//    in your VPC and an EC2-Classic instance that's linked to the peer VPC.
    +//
    +//    * Enable/disable a local VPC to resolve public DNS hostnames to private
    +//    IP addresses when queried from instances in the peer VPC.
    +//
    +// If the peered VPCs are in different accounts, each owner must initiate a
    +// separate request to modify the peering connection options, depending on whether
    +// their VPC was the requester or accepter for the VPC peering connection. If
    +// the peered VPCs are in the same account, you can modify the requester and
    +// accepter options in the same request. To confirm which VPC is the accepter
    +// and requester for a VPC peering connection, use the DescribeVpcPeeringConnections
    +// command.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ModifyVpcPeeringConnectionOptions for usage and error information.
    +func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectionOptionsInput) (*ModifyVpcPeeringConnectionOptionsOutput, error) {
    +	req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opMonitorInstances = "MonitorInstances"
    +
    +// MonitorInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the MonitorInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See MonitorInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the MonitorInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the MonitorInstancesRequest method.
    +//    req, resp := client.MonitorInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opMonitorInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &MonitorInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &MonitorInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// MonitorInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Enables monitoring for a running instance. For more information about monitoring
    +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation MonitorInstances for usage and error information.
    +func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) {
    +	req, out := c.MonitorInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opMoveAddressToVpc = "MoveAddressToVpc"
    +
    +// MoveAddressToVpcRequest generates a "aws/request.Request" representing the
    +// client's request for the MoveAddressToVpc operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See MoveAddressToVpc for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the MoveAddressToVpc method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the MoveAddressToVpcRequest method.
    +//    req, resp := client.MoveAddressToVpcRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) {
    +	op := &request.Operation{
    +		Name:       opMoveAddressToVpc,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &MoveAddressToVpcInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &MoveAddressToVpcOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// MoveAddressToVpc API operation for Amazon Elastic Compute Cloud.
    +//
    +// Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC
    +// platform. The Elastic IP address must be allocated to your account for more
    +// than 24 hours, and it must not be associated with an instance. After the
    +// Elastic IP address is moved, it is no longer available for use in the EC2-Classic
    +// platform, unless you move it back using the RestoreAddressToClassic request.
    +// You cannot move an Elastic IP address that was originally allocated for use
    +// in the EC2-VPC platform to the EC2-Classic platform.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation MoveAddressToVpc for usage and error information.
    +func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) {
    +	req, out := c.MoveAddressToVpcRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opPurchaseHostReservation = "PurchaseHostReservation"
    +
    +// PurchaseHostReservationRequest generates a "aws/request.Request" representing the
    +// client's request for the PurchaseHostReservation operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See PurchaseHostReservation for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the PurchaseHostReservation method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the PurchaseHostReservationRequest method.
    +//    req, resp := client.PurchaseHostReservationRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) PurchaseHostReservationRequest(input *PurchaseHostReservationInput) (req *request.Request, output *PurchaseHostReservationOutput) {
    +	op := &request.Operation{
    +		Name:       opPurchaseHostReservation,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &PurchaseHostReservationInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &PurchaseHostReservationOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// PurchaseHostReservation API operation for Amazon Elastic Compute Cloud.
    +//
    +// Purchase a reservation with configurations that match those of your Dedicated
    +// Host. You must have active Dedicated Hosts in your account before you purchase
    +// a reservation. This action results in the specified reservation being purchased
    +// and charged to your account.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation PurchaseHostReservation for usage and error information.
    +func (c *EC2) PurchaseHostReservation(input *PurchaseHostReservationInput) (*PurchaseHostReservationOutput, error) {
    +	req, out := c.PurchaseHostReservationRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering"
    +
    +// PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the
    +// client's request for the PurchaseReservedInstancesOffering operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See PurchaseReservedInstancesOffering for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the PurchaseReservedInstancesOffering method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the PurchaseReservedInstancesOfferingRequest method.
    +//    req, resp := client.PurchaseReservedInstancesOfferingRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) {
    +	op := &request.Operation{
    +		Name:       opPurchaseReservedInstancesOffering,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &PurchaseReservedInstancesOfferingInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &PurchaseReservedInstancesOfferingOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// PurchaseReservedInstancesOffering API operation for Amazon Elastic Compute Cloud.
    +//
    +// Purchases a Reserved Instance for use with your account. With Reserved Instances,
    +// you pay a lower hourly rate compared to On-Demand instance pricing.
    +//
    +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance
    +// offerings that match your specifications. After you've purchased a Reserved
    +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances.
    +//
    +// For more information, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html)
    +// and Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation PurchaseReservedInstancesOffering for usage and error information.
    +func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) {
    +	req, out := c.PurchaseReservedInstancesOfferingRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opPurchaseScheduledInstances = "PurchaseScheduledInstances"
    +
    +// PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the PurchaseScheduledInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See PurchaseScheduledInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the PurchaseScheduledInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the PurchaseScheduledInstancesRequest method.
    +//    req, resp := client.PurchaseScheduledInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opPurchaseScheduledInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &PurchaseScheduledInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &PurchaseScheduledInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// PurchaseScheduledInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Purchases one or more Scheduled Instances with the specified schedule.
    +//
    +// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by
    +// the hour for a one-year term. Before you can purchase a Scheduled Instance,
    +// you must call DescribeScheduledInstanceAvailability to check for available
    +// schedules and obtain a purchase token. After you purchase a Scheduled Instance,
    +// you must call RunScheduledInstances during each scheduled time period.
    +//
    +// After you purchase a Scheduled Instance, you can't cancel, modify, or resell
    +// your purchase.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation PurchaseScheduledInstances for usage and error information.
    +func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) {
    +	req, out := c.PurchaseScheduledInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRebootInstances = "RebootInstances"
    +
    +// RebootInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the RebootInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RebootInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RebootInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RebootInstancesRequest method.
    +//    req, resp := client.RebootInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opRebootInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RebootInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &RebootInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RebootInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Requests a reboot of one or more instances. This operation is asynchronous;
    +// it only queues a request to reboot the specified instances. The operation
    +// succeeds if the instances are valid and belong to you. Requests to reboot
    +// terminated instances are ignored.
    +//
    +// If an instance does not cleanly shut down within four minutes, Amazon EC2
    +// performs a hard reboot.
    +//
    +// For more information about troubleshooting, see Getting Console Output and
    +// Rebooting Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RebootInstances for usage and error information.
    +func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) {
    +	req, out := c.RebootInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRegisterImage = "RegisterImage"
    +
    +// RegisterImageRequest generates a "aws/request.Request" representing the
    +// client's request for the RegisterImage operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RegisterImage for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RegisterImage method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RegisterImageRequest method.
    +//    req, resp := client.RegisterImageRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) {
    +	op := &request.Operation{
    +		Name:       opRegisterImage,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RegisterImageInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &RegisterImageOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RegisterImage API operation for Amazon Elastic Compute Cloud.
    +//
    +// Registers an AMI. When you're creating an AMI, this is the final step you
    +// must complete before you can launch an instance from the AMI. For more information
    +// about creating AMIs, see Creating Your Own AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// For Amazon EBS-backed instances, CreateImage creates and registers the AMI
    +// in a single request, so you don't have to register the AMI yourself.
    +//
    +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from
    +// a snapshot of a root device volume. For more information, see Launching an
    +// Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE
    +// Linux Enterprise Server (SLES), use the EC2 billingProduct code associated
    +// with an AMI to verify subscription status for package updates. Creating an
    +// AMI from an EBS snapshot does not maintain this billing code, and subsequent
    +// instances launched from such an AMI will not be able to connect to package
    +// update infrastructure.
    +//
    +// Similarly, although you can create a Windows AMI from a snapshot, you can't
    +// successfully launch an instance from the AMI.
    +//
    +// To create Windows AMIs or to create AMIs for Linux operating systems that
    +// must retain AMI billing codes to work properly, see CreateImage.
    +//
    +// If needed, you can deregister an AMI at any time. Any modifications you make
    +// to an AMI backed by an instance store volume invalidates its registration.
    +// If you make changes to an image, deregister the previous image and register
    +// the new image.
    +//
    +// You can't register an image where a secondary (non-root) snapshot has AWS
    +// Marketplace product codes.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RegisterImage for usage and error information.
    +func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) {
    +	req, out := c.RegisterImageRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection"
    +
    +// RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the
    +// client's request for the RejectVpcPeeringConnection operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RejectVpcPeeringConnection for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RejectVpcPeeringConnection method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RejectVpcPeeringConnectionRequest method.
    +//    req, resp := client.RejectVpcPeeringConnectionRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) {
    +	op := &request.Operation{
    +		Name:       opRejectVpcPeeringConnection,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RejectVpcPeeringConnectionInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &RejectVpcPeeringConnectionOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RejectVpcPeeringConnection API operation for Amazon Elastic Compute Cloud.
    +//
    +// Rejects a VPC peering connection request. The VPC peering connection must
    +// be in the pending-acceptance state. Use the DescribeVpcPeeringConnections
    +// request to view your outstanding VPC peering connection requests. To delete
    +// an active VPC peering connection, or to delete a VPC peering connection request
    +// that you initiated, use DeleteVpcPeeringConnection.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RejectVpcPeeringConnection for usage and error information.
    +func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) {
    +	req, out := c.RejectVpcPeeringConnectionRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReleaseAddress = "ReleaseAddress"
    +
    +// ReleaseAddressRequest generates a "aws/request.Request" representing the
    +// client's request for the ReleaseAddress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReleaseAddress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReleaseAddress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReleaseAddressRequest method.
    +//    req, resp := client.ReleaseAddressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) {
    +	op := &request.Operation{
    +		Name:       opReleaseAddress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReleaseAddressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ReleaseAddressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReleaseAddress API operation for Amazon Elastic Compute Cloud.
    +//
    +// Releases the specified Elastic IP address.
    +//
    +// After releasing an Elastic IP address, it is released to the IP address pool
    +// and might be unavailable to you. Be sure to update your DNS records and any
    +// servers or devices that communicate with the address. If you attempt to release
    +// an Elastic IP address that you already released, you'll get an AuthFailure
    +// error if the address is already allocated to another AWS account.
    +//
    +// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically
    +// disassociates it from any instance that it's associated with. To disassociate
    +// an Elastic IP address without releasing it, use DisassociateAddress.
    +//
    +// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic
    +// IP address before you try to release it. Otherwise, Amazon EC2 returns an
    +// error (InvalidIPAddress.InUse).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReleaseAddress for usage and error information.
    +func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) {
    +	req, out := c.ReleaseAddressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReleaseHosts = "ReleaseHosts"
    +
    +// ReleaseHostsRequest generates a "aws/request.Request" representing the
    +// client's request for the ReleaseHosts operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReleaseHosts for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReleaseHosts method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReleaseHostsRequest method.
    +//    req, resp := client.ReleaseHostsRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Request, output *ReleaseHostsOutput) {
    +	op := &request.Operation{
    +		Name:       opReleaseHosts,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReleaseHostsInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ReleaseHostsOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReleaseHosts API operation for Amazon Elastic Compute Cloud.
    +//
    +// When you no longer want to use an On-Demand Dedicated Host it can be released.
    +// On-Demand billing is stopped and the host goes into released state. The host
    +// ID of Dedicated Hosts that have been released can no longer be specified
    +// in another request, e.g., ModifyHosts. You must stop or terminate all instances
    +// on a host before it can be released.
    +//
    +// When Dedicated Hosts are released, it make take some time for them to stop
    +// counting toward your limit and you may receive capacity errors when trying
    +// to allocate new Dedicated hosts. Try waiting a few minutes, and then try
    +// again.
    +//
    +// Released hosts will still appear in a DescribeHosts response.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReleaseHosts for usage and error information.
    +func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error) {
    +	req, out := c.ReleaseHostsRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation"
    +
    +// ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the
    +// client's request for the ReplaceNetworkAclAssociation operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReplaceNetworkAclAssociation for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReplaceNetworkAclAssociation method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReplaceNetworkAclAssociationRequest method.
    +//    req, resp := client.ReplaceNetworkAclAssociationRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) {
    +	op := &request.Operation{
    +		Name:       opReplaceNetworkAclAssociation,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReplaceNetworkAclAssociationInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ReplaceNetworkAclAssociationOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReplaceNetworkAclAssociation API operation for Amazon Elastic Compute Cloud.
    +//
    +// Changes which network ACL a subnet is associated with. By default when you
    +// create a subnet, it's automatically associated with the default network ACL.
    +// For more information about network ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReplaceNetworkAclAssociation for usage and error information.
    +func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) {
    +	req, out := c.ReplaceNetworkAclAssociationRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry"
    +
    +// ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the
    +// client's request for the ReplaceNetworkAclEntry operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReplaceNetworkAclEntry for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReplaceNetworkAclEntry method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReplaceNetworkAclEntryRequest method.
    +//    req, resp := client.ReplaceNetworkAclEntryRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) {
    +	op := &request.Operation{
    +		Name:       opReplaceNetworkAclEntry,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReplaceNetworkAclEntryInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ReplaceNetworkAclEntryOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReplaceNetworkAclEntry API operation for Amazon Elastic Compute Cloud.
    +//
    +// Replaces an entry (rule) in a network ACL. For more information about network
    +// ACLs, see Network ACLs (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReplaceNetworkAclEntry for usage and error information.
    +func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) {
    +	req, out := c.ReplaceNetworkAclEntryRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReplaceRoute = "ReplaceRoute"
    +
    +// ReplaceRouteRequest generates a "aws/request.Request" representing the
    +// client's request for the ReplaceRoute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReplaceRoute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReplaceRoute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReplaceRouteRequest method.
    +//    req, resp := client.ReplaceRouteRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) {
    +	op := &request.Operation{
    +		Name:       opReplaceRoute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReplaceRouteInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ReplaceRouteOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReplaceRoute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Replaces an existing route within a route table in a VPC. You must provide
    +// only one of the following: Internet gateway or virtual private gateway, NAT
    +// instance, NAT gateway, VPC peering connection, or network interface.
    +//
    +// For more information about route tables, see Route Tables (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReplaceRoute for usage and error information.
    +func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) {
    +	req, out := c.ReplaceRouteRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation"
    +
    +// ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the
    +// client's request for the ReplaceRouteTableAssociation operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReplaceRouteTableAssociation for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReplaceRouteTableAssociation method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReplaceRouteTableAssociationRequest method.
    +//    req, resp := client.ReplaceRouteTableAssociationRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) {
    +	op := &request.Operation{
    +		Name:       opReplaceRouteTableAssociation,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReplaceRouteTableAssociationInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &ReplaceRouteTableAssociationOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReplaceRouteTableAssociation API operation for Amazon Elastic Compute Cloud.
    +//
    +// Changes the route table associated with a given subnet in a VPC. After the
    +// operation completes, the subnet uses the routes in the new route table it's
    +// associated with. For more information about route tables, see Route Tables
    +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// You can also use ReplaceRouteTableAssociation to change which table is the
    +// main route table in the VPC. You just specify the main route table's association
    +// ID and the route table to be the new main route table.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReplaceRouteTableAssociation for usage and error information.
    +func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) {
    +	req, out := c.ReplaceRouteTableAssociationRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opReportInstanceStatus = "ReportInstanceStatus"
    +
    +// ReportInstanceStatusRequest generates a "aws/request.Request" representing the
    +// client's request for the ReportInstanceStatus operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ReportInstanceStatus for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ReportInstanceStatus method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ReportInstanceStatusRequest method.
    +//    req, resp := client.ReportInstanceStatusRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) {
    +	op := &request.Operation{
    +		Name:       opReportInstanceStatus,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ReportInstanceStatusInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ReportInstanceStatusOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ReportInstanceStatus API operation for Amazon Elastic Compute Cloud.
    +//
    +// Submits feedback about the status of an instance. The instance must be in
    +// the running state. If your experience with the instance differs from the
    +// instance status returned by DescribeInstanceStatus, use ReportInstanceStatus
    +// to report your experience with the instance. Amazon EC2 collects this information
    +// to improve the accuracy of status checks.
    +//
    +// Use of this action does not change the value returned by DescribeInstanceStatus.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ReportInstanceStatus for usage and error information.
    +func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) {
    +	req, out := c.ReportInstanceStatusRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRequestSpotFleet = "RequestSpotFleet"
    +
    +// RequestSpotFleetRequest generates a "aws/request.Request" representing the
    +// client's request for the RequestSpotFleet operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RequestSpotFleet for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RequestSpotFleet method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RequestSpotFleetRequest method.
    +//    req, resp := client.RequestSpotFleetRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) {
    +	op := &request.Operation{
    +		Name:       opRequestSpotFleet,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RequestSpotFleetInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &RequestSpotFleetOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RequestSpotFleet API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a Spot fleet request.
    +//
    +// You can submit a single request that includes multiple launch specifications
    +// that vary by instance type, AMI, Availability Zone, or subnet.
    +//
    +// By default, the Spot fleet requests Spot instances in the Spot pool where
    +// the price per unit is the lowest. Each launch specification can include its
    +// own instance weighting that reflects the value of the instance type to your
    +// application workload.
    +//
    +// Alternatively, you can specify that the Spot fleet distribute the target
    +// capacity across the Spot pools included in its launch specifications. By
    +// ensuring that the Spot instances in your Spot fleet are in different Spot
    +// pools, you can improve the availability of your fleet.
    +//
    +// For more information, see Spot Fleet Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RequestSpotFleet for usage and error information.
    +func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) {
    +	req, out := c.RequestSpotFleetRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRequestSpotInstances = "RequestSpotInstances"
    +
    +// RequestSpotInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the RequestSpotInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RequestSpotInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RequestSpotInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RequestSpotInstancesRequest method.
    +//    req, resp := client.RequestSpotInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opRequestSpotInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RequestSpotInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &RequestSpotInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RequestSpotInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Creates a Spot instance request. Spot instances are instances that Amazon
    +// EC2 launches when the bid price that you specify exceeds the current Spot
    +// price. Amazon EC2 periodically sets the Spot price based on available Spot
    +// Instance capacity and current Spot instance requests. For more information,
    +// see Spot Instance Requests (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RequestSpotInstances for usage and error information.
    +func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) {
    +	req, out := c.RequestSpotInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opResetImageAttribute = "ResetImageAttribute"
    +
    +// ResetImageAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ResetImageAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ResetImageAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ResetImageAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ResetImageAttributeRequest method.
    +//    req, resp := client.ResetImageAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opResetImageAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ResetImageAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ResetImageAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ResetImageAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Resets an attribute of an AMI to its default value.
    +//
    +// The productCodes attribute can't be reset.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ResetImageAttribute for usage and error information.
    +func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) {
    +	req, out := c.ResetImageAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opResetInstanceAttribute = "ResetInstanceAttribute"
    +
    +// ResetInstanceAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ResetInstanceAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ResetInstanceAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ResetInstanceAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ResetInstanceAttributeRequest method.
    +//    req, resp := client.ResetInstanceAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opResetInstanceAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ResetInstanceAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ResetInstanceAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ResetInstanceAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Resets an attribute of an instance to its default value. To reset the kernel
    +// or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck,
    +// the instance can be either running or stopped.
    +//
    +// The sourceDestCheck attribute controls whether source/destination checking
    +// is enabled. The default value is true, which means checking is enabled. This
    +// value must be false for a NAT instance to perform NAT. For more information,
    +// see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html)
    +// in the Amazon Virtual Private Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ResetInstanceAttribute for usage and error information.
    +func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) {
    +	req, out := c.ResetInstanceAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute"
    +
    +// ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ResetNetworkInterfaceAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ResetNetworkInterfaceAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ResetNetworkInterfaceAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ResetNetworkInterfaceAttributeRequest method.
    +//    req, resp := client.ResetNetworkInterfaceAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opResetNetworkInterfaceAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ResetNetworkInterfaceAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ResetNetworkInterfaceAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ResetNetworkInterfaceAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Resets a network interface attribute. You can specify only one attribute
    +// at a time.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ResetNetworkInterfaceAttribute for usage and error information.
    +func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) {
    +	req, out := c.ResetNetworkInterfaceAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opResetSnapshotAttribute = "ResetSnapshotAttribute"
    +
    +// ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the
    +// client's request for the ResetSnapshotAttribute operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See ResetSnapshotAttribute for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the ResetSnapshotAttribute method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the ResetSnapshotAttributeRequest method.
    +//    req, resp := client.ResetSnapshotAttributeRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) {
    +	op := &request.Operation{
    +		Name:       opResetSnapshotAttribute,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &ResetSnapshotAttributeInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &ResetSnapshotAttributeOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// ResetSnapshotAttribute API operation for Amazon Elastic Compute Cloud.
    +//
    +// Resets permission settings for the specified snapshot.
    +//
    +// For more information on modifying snapshot permissions, see Sharing Snapshots
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation ResetSnapshotAttribute for usage and error information.
    +func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) {
    +	req, out := c.ResetSnapshotAttributeRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRestoreAddressToClassic = "RestoreAddressToClassic"
    +
    +// RestoreAddressToClassicRequest generates a "aws/request.Request" representing the
    +// client's request for the RestoreAddressToClassic operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RestoreAddressToClassic for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RestoreAddressToClassic method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RestoreAddressToClassicRequest method.
    +//    req, resp := client.RestoreAddressToClassicRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) {
    +	op := &request.Operation{
    +		Name:       opRestoreAddressToClassic,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RestoreAddressToClassicInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &RestoreAddressToClassicOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RestoreAddressToClassic API operation for Amazon Elastic Compute Cloud.
    +//
    +// Restores an Elastic IP address that was previously moved to the EC2-VPC platform
    +// back to the EC2-Classic platform. You cannot move an Elastic IP address that
    +// was originally allocated for use in EC2-VPC. The Elastic IP address must
    +// not be associated with an instance or network interface.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RestoreAddressToClassic for usage and error information.
    +func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) {
    +	req, out := c.RestoreAddressToClassicRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress"
    +
    +// RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the
    +// client's request for the RevokeSecurityGroupEgress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RevokeSecurityGroupEgress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RevokeSecurityGroupEgress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RevokeSecurityGroupEgressRequest method.
    +//    req, resp := client.RevokeSecurityGroupEgressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) {
    +	op := &request.Operation{
    +		Name:       opRevokeSecurityGroupEgress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RevokeSecurityGroupEgressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &RevokeSecurityGroupEgressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud.
    +//
    +// [EC2-VPC only] Removes one or more egress rules from a security group for
    +// EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic.
    +// The values that you specify in the revoke request (for example, ports) must
    +// match the existing rule's values for the rule to be revoked.
    +//
    +// Each rule consists of the protocol and the CIDR range or source security
    +// group. For the TCP and UDP protocols, you must also specify the destination
    +// port or range of ports. For the ICMP protocol, you must also specify the
    +// ICMP type and code.
    +//
    +// Rule changes are propagated to instances within the security group as quickly
    +// as possible. However, a small delay might occur.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RevokeSecurityGroupEgress for usage and error information.
    +func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) {
    +	req, out := c.RevokeSecurityGroupEgressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress"
    +
    +// RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the
    +// client's request for the RevokeSecurityGroupIngress operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RevokeSecurityGroupIngress for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RevokeSecurityGroupIngress method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RevokeSecurityGroupIngressRequest method.
    +//    req, resp := client.RevokeSecurityGroupIngressRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) {
    +	op := &request.Operation{
    +		Name:       opRevokeSecurityGroupIngress,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RevokeSecurityGroupIngressInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &RevokeSecurityGroupIngressOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RevokeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud.
    +//
    +// Removes one or more ingress rules from a security group. The values that
    +// you specify in the revoke request (for example, ports) must match the existing
    +// rule's values for the rule to be removed.
    +//
    +// Each rule consists of the protocol and the CIDR range or source security
    +// group. For the TCP and UDP protocols, you must also specify the destination
    +// port or range of ports. For the ICMP protocol, you must also specify the
    +// ICMP type and code.
    +//
    +// Rule changes are propagated to instances within the security group as quickly
    +// as possible. However, a small delay might occur.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RevokeSecurityGroupIngress for usage and error information.
    +func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) {
    +	req, out := c.RevokeSecurityGroupIngressRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRunInstances = "RunInstances"
    +
    +// RunInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the RunInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RunInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RunInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RunInstancesRequest method.
    +//    req, resp := client.RunInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) {
    +	op := &request.Operation{
    +		Name:       opRunInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RunInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &Reservation{}
    +	req.Data = output
    +	return
    +}
    +
    +// RunInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Launches the specified number of instances using an AMI for which you have
    +// permissions.
    +//
    +// When you launch an instance, it enters the pending state. After the instance
    +// is ready for you, it enters the running state. To check the state of your
    +// instance, call DescribeInstances.
    +//
    +// To ensure faster instance launches, break up large requests into smaller
    +// batches. For example, create five separate launch requests for 100 instances
    +// each instead of one launch request for 500 instances.
    +//
    +// To tag your instance, ensure that it is running as CreateTags requires a
    +// resource ID. For more information about tagging, see Tagging Your Amazon
    +// EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html).
    +//
    +// If you don't specify a security group when launching an instance, Amazon
    +// EC2 uses the default security group. For more information, see Security Groups
    +// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// [EC2-VPC only accounts] If you don't specify a subnet in the request, we
    +// choose a default subnet from your default VPC for you.
    +//
    +// [EC2-Classic accounts] If you're launching into EC2-Classic and you don't
    +// specify an Availability Zone, we choose one for you.
    +//
    +// Linux instances have access to the public key of the key pair at boot. You
    +// can use this key to provide secure access to the instance. Amazon EC2 public
    +// images use this feature to provide secure access without passwords. For more
    +// information, see Key Pairs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// You can provide optional user data when launching an instance. For more information,
    +// see Instance Metadata (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// If any of the AMIs have a product code attached for which the user has not
    +// subscribed, RunInstances fails.
    +//
    +// Some instance types can only be launched into a VPC. If you do not have a
    +// default VPC, or if you do not specify a subnet ID in the request, RunInstances
    +// fails. For more information, see Instance Types Available Only in a VPC (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types).
    +//
    +// For more information about troubleshooting, see What To Do If An Instance
    +// Immediately Terminates (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html),
    +// and Troubleshooting Connecting to Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RunInstances for usage and error information.
    +func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) {
    +	req, out := c.RunInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opRunScheduledInstances = "RunScheduledInstances"
    +
    +// RunScheduledInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the RunScheduledInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See RunScheduledInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the RunScheduledInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the RunScheduledInstancesRequest method.
    +//    req, resp := client.RunScheduledInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opRunScheduledInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &RunScheduledInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &RunScheduledInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// RunScheduledInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Launches the specified Scheduled Instances.
    +//
    +// Before you can launch a Scheduled Instance, you must purchase it and obtain
    +// an identifier using PurchaseScheduledInstances.
    +//
    +// You must launch a Scheduled Instance during its scheduled time period. You
    +// can't stop or reboot a Scheduled Instance, but you can terminate it as needed.
    +// If you terminate a Scheduled Instance before the current scheduled time period
    +// ends, you can launch it again after a few minutes. For more information,
    +// see Scheduled Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation RunScheduledInstances for usage and error information.
    +func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) {
    +	req, out := c.RunScheduledInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opStartInstances = "StartInstances"
    +
    +// StartInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the StartInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See StartInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the StartInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the StartInstancesRequest method.
    +//    req, resp := client.StartInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opStartInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &StartInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &StartInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// StartInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Starts an Amazon EBS-backed AMI that you've previously stopped.
    +//
    +// Instances that use Amazon EBS volumes as their root devices can be quickly
    +// stopped and started. When an instance is stopped, the compute resources are
    +// released and you are not billed for hourly instance usage. However, your
    +// root partition Amazon EBS volume remains, continues to persist your data,
    +// and you are charged for Amazon EBS volume usage. You can restart your instance
    +// at any time. Each time you transition an instance from stopped to started,
    +// Amazon EC2 charges a full instance hour, even if transitions happen multiple
    +// times within a single hour.
    +//
    +// Before stopping an instance, make sure it is in a state from which it can
    +// be restarted. Stopping an instance does not preserve data stored in RAM.
    +//
    +// Performing this operation on an instance that uses an instance store as its
    +// root device returns an error.
    +//
    +// For more information, see Stopping Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation StartInstances for usage and error information.
    +func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) {
    +	req, out := c.StartInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opStopInstances = "StopInstances"
    +
    +// StopInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the StopInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See StopInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the StopInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the StopInstancesRequest method.
    +//    req, resp := client.StopInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opStopInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &StopInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &StopInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// StopInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Stops an Amazon EBS-backed instance.
    +//
    +// We don't charge hourly usage for a stopped instance, or data transfer fees;
    +// however, your root partition Amazon EBS volume remains, continues to persist
    +// your data, and you are charged for Amazon EBS volume usage. Each time you
    +// transition an instance from stopped to started, Amazon EC2 charges a full
    +// instance hour, even if transitions happen multiple times within a single
    +// hour.
    +//
    +// You can't start or stop Spot instances, and you can't stop instance store-backed
    +// instances.
    +//
    +// When you stop an instance, we shut it down. You can restart your instance
    +// at any time. Before stopping an instance, make sure it is in a state from
    +// which it can be restarted. Stopping an instance does not preserve data stored
    +// in RAM.
    +//
    +// Stopping an instance is different to rebooting or terminating it. For example,
    +// when you stop an instance, the root device and any other devices attached
    +// to the instance persist. When you terminate an instance, the root device
    +// and any other devices attached during the instance launch are automatically
    +// deleted. For more information about the differences between rebooting, stopping,
    +// and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// When you stop an instance, we attempt to shut it down forcibly after a short
    +// while. If your instance appears stuck in the stopping state after a period
    +// of time, there may be an issue with the underlying host computer. For more
    +// information, see Troubleshooting Stopping Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation StopInstances for usage and error information.
    +func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) {
    +	req, out := c.StopInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opTerminateInstances = "TerminateInstances"
    +
    +// TerminateInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the TerminateInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See TerminateInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the TerminateInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the TerminateInstancesRequest method.
    +//    req, resp := client.TerminateInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opTerminateInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &TerminateInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &TerminateInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// TerminateInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Shuts down one or more instances. This operation is idempotent; if you terminate
    +// an instance more than once, each call succeeds.
    +//
    +// If you specify multiple instances and the request fails (for example, because
    +// of a single incorrect instance ID), none of the instances are terminated.
    +//
    +// Terminated instances remain visible after termination (for approximately
    +// one hour).
    +//
    +// By default, Amazon EC2 deletes all EBS volumes that were attached when the
    +// instance launched. Volumes attached after instance launch continue running.
    +//
    +// You can stop, start, and terminate EBS-backed instances. You can only terminate
    +// instance store-backed instances. What happens to an instance differs if you
    +// stop it or terminate it. For example, when you stop an instance, the root
    +// device and any other devices attached to the instance persist. When you terminate
    +// an instance, any attached EBS volumes with the DeleteOnTermination block
    +// device mapping parameter set to true are automatically deleted. For more
    +// information about the differences between stopping and terminating instances,
    +// see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// For more information about troubleshooting, see Troubleshooting Terminating
    +// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation TerminateInstances for usage and error information.
    +func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) {
    +	req, out := c.TerminateInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses"
    +
    +// UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the
    +// client's request for the UnassignPrivateIpAddresses operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See UnassignPrivateIpAddresses for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the UnassignPrivateIpAddresses method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the UnassignPrivateIpAddressesRequest method.
    +//    req, resp := client.UnassignPrivateIpAddressesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) {
    +	op := &request.Operation{
    +		Name:       opUnassignPrivateIpAddresses,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &UnassignPrivateIpAddressesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler)
    +	req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
    +	output = &UnassignPrivateIpAddressesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// UnassignPrivateIpAddresses API operation for Amazon Elastic Compute Cloud.
    +//
    +// Unassigns one or more secondary private IP addresses from a network interface.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation UnassignPrivateIpAddresses for usage and error information.
    +func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) {
    +	req, out := c.UnassignPrivateIpAddressesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opUnmonitorInstances = "UnmonitorInstances"
    +
    +// UnmonitorInstancesRequest generates a "aws/request.Request" representing the
    +// client's request for the UnmonitorInstances operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See UnmonitorInstances for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the UnmonitorInstances method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the UnmonitorInstancesRequest method.
    +//    req, resp := client.UnmonitorInstancesRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) {
    +	op := &request.Operation{
    +		Name:       opUnmonitorInstances,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &UnmonitorInstancesInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &UnmonitorInstancesOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// UnmonitorInstances API operation for Amazon Elastic Compute Cloud.
    +//
    +// Disables monitoring for a running instance. For more information about monitoring
    +// instances, see Monitoring Your Instances and Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html)
    +// in the Amazon Elastic Compute Cloud User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for Amazon Elastic Compute Cloud's
    +// API operation UnmonitorInstances for usage and error information.
    +func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) {
    +	req, out := c.UnmonitorInstancesRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +// Contains the parameters for accepting the quote.
    +type AcceptReservedInstancesExchangeQuoteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The IDs of the Convertible Reserved Instances that you want to exchange for
    +	// other Convertible Reserved Instances of the same or higher value.
    +	//
    +	// ReservedInstanceIds is a required field
    +	ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"`
    +
    +	// The configurations of the Convertible Reserved Instance offerings you are
    +	// purchasing in this exchange.
    +	TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s AcceptReservedInstancesExchangeQuoteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AcceptReservedInstancesExchangeQuoteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AcceptReservedInstancesExchangeQuoteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AcceptReservedInstancesExchangeQuoteInput"}
    +	if s.ReservedInstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReservedInstanceIds"))
    +	}
    +	if s.TargetConfigurations != nil {
    +		for i, v := range s.TargetConfigurations {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetConfigurations", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AcceptReservedInstancesExchangeQuoteInput) SetDryRun(v bool) *AcceptReservedInstancesExchangeQuoteInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetReservedInstanceIds sets the ReservedInstanceIds field's value.
    +func (s *AcceptReservedInstancesExchangeQuoteInput) SetReservedInstanceIds(v []*string) *AcceptReservedInstancesExchangeQuoteInput {
    +	s.ReservedInstanceIds = v
    +	return s
    +}
    +
    +// SetTargetConfigurations sets the TargetConfigurations field's value.
    +func (s *AcceptReservedInstancesExchangeQuoteInput) SetTargetConfigurations(v []*TargetConfigurationRequest) *AcceptReservedInstancesExchangeQuoteInput {
    +	s.TargetConfigurations = v
    +	return s
    +}
    +
    +// The result of the exchange and whether it was successful.
    +type AcceptReservedInstancesExchangeQuoteOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the successful exchange.
    +	ExchangeId *string `locationName:"exchangeId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AcceptReservedInstancesExchangeQuoteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AcceptReservedInstancesExchangeQuoteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetExchangeId sets the ExchangeId field's value.
    +func (s *AcceptReservedInstancesExchangeQuoteOutput) SetExchangeId(v string) *AcceptReservedInstancesExchangeQuoteOutput {
    +	s.ExchangeId = &v
    +	return s
    +}
    +
    +// Contains the parameters for AcceptVpcPeeringConnection.
    +type AcceptVpcPeeringConnectionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC peering connection.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AcceptVpcPeeringConnectionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AcceptVpcPeeringConnectionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AcceptVpcPeeringConnectionInput) SetDryRun(v bool) *AcceptVpcPeeringConnectionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *AcceptVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *AcceptVpcPeeringConnectionInput {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Contains the output of AcceptVpcPeeringConnection.
    +type AcceptVpcPeeringConnectionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPC peering connection.
    +	VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AcceptVpcPeeringConnectionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AcceptVpcPeeringConnectionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcPeeringConnection sets the VpcPeeringConnection field's value.
    +func (s *AcceptVpcPeeringConnectionOutput) SetVpcPeeringConnection(v *VpcPeeringConnection) *AcceptVpcPeeringConnectionOutput {
    +	s.VpcPeeringConnection = v
    +	return s
    +}
    +
    +// Describes an account attribute.
    +type AccountAttribute struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the account attribute.
    +	AttributeName *string `locationName:"attributeName" type:"string"`
    +
    +	// One or more values for the account attribute.
    +	AttributeValues []*AccountAttributeValue `locationName:"attributeValueSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s AccountAttribute) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AccountAttribute) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttributeName sets the AttributeName field's value.
    +func (s *AccountAttribute) SetAttributeName(v string) *AccountAttribute {
    +	s.AttributeName = &v
    +	return s
    +}
    +
    +// SetAttributeValues sets the AttributeValues field's value.
    +func (s *AccountAttribute) SetAttributeValues(v []*AccountAttributeValue) *AccountAttribute {
    +	s.AttributeValues = v
    +	return s
    +}
    +
    +// Describes a value of an account attribute.
    +type AccountAttributeValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The value of the attribute.
    +	AttributeValue *string `locationName:"attributeValue" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AccountAttributeValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AccountAttributeValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttributeValue sets the AttributeValue field's value.
    +func (s *AccountAttributeValue) SetAttributeValue(v string) *AccountAttributeValue {
    +	s.AttributeValue = &v
    +	return s
    +}
    +
    +// Describes a running instance in a Spot fleet.
    +type ActiveInstance struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string"`
    +
    +	// The ID of the Spot instance request.
    +	SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ActiveInstance) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ActiveInstance) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ActiveInstance) SetInstanceId(v string) *ActiveInstance {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ActiveInstance) SetInstanceType(v string) *ActiveInstance {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value.
    +func (s *ActiveInstance) SetSpotInstanceRequestId(v string) *ActiveInstance {
    +	s.SpotInstanceRequestId = &v
    +	return s
    +}
    +
    +// Describes an Elastic IP address.
    +type Address struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID representing the allocation of the address for use with EC2-VPC.
    +	AllocationId *string `locationName:"allocationId" type:"string"`
    +
    +	// The ID representing the association of the address with an instance in a
    +	// VPC.
    +	AssociationId *string `locationName:"associationId" type:"string"`
    +
    +	// Indicates whether this Elastic IP address is for use with instances in EC2-Classic
    +	// (standard) or instances in a VPC (vpc).
    +	Domain *string `locationName:"domain" type:"string" enum:"DomainType"`
    +
    +	// The ID of the instance that the address is associated with (if any).
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The ID of the AWS account that owns the network interface.
    +	NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"`
    +
    +	// The private IP address associated with the Elastic IP address.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// The Elastic IP address.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Address) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Address) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *Address) SetAllocationId(v string) *Address {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *Address) SetAssociationId(v string) *Address {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// SetDomain sets the Domain field's value.
    +func (s *Address) SetDomain(v string) *Address {
    +	s.Domain = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *Address) SetInstanceId(v string) *Address {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *Address) SetNetworkInterfaceId(v string) *Address {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceOwnerId sets the NetworkInterfaceOwnerId field's value.
    +func (s *Address) SetNetworkInterfaceOwnerId(v string) *Address {
    +	s.NetworkInterfaceOwnerId = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *Address) SetPrivateIpAddress(v string) *Address {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *Address) SetPublicIp(v string) *Address {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Contains the parameters for AllocateAddress.
    +type AllocateAddressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Set to vpc to allocate the address for use with instances in a VPC.
    +	//
    +	// Default: The address is for use with instances in EC2-Classic.
    +	Domain *string `type:"string" enum:"DomainType"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s AllocateAddressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AllocateAddressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDomain sets the Domain field's value.
    +func (s *AllocateAddressInput) SetDomain(v string) *AllocateAddressInput {
    +	s.Domain = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AllocateAddressInput) SetDryRun(v bool) *AllocateAddressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output of AllocateAddress.
    +type AllocateAddressOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic
    +	// IP address for use with instances in a VPC.
    +	AllocationId *string `locationName:"allocationId" type:"string"`
    +
    +	// Indicates whether this Elastic IP address is for use with instances in EC2-Classic
    +	// (standard) or instances in a VPC (vpc).
    +	Domain *string `locationName:"domain" type:"string" enum:"DomainType"`
    +
    +	// The Elastic IP address.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AllocateAddressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AllocateAddressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *AllocateAddressOutput) SetAllocationId(v string) *AllocateAddressOutput {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetDomain sets the Domain field's value.
    +func (s *AllocateAddressOutput) SetDomain(v string) *AllocateAddressOutput {
    +	s.Domain = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *AllocateAddressOutput) SetPublicIp(v string) *AllocateAddressOutput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Contains the parameters for AllocateHosts.
    +type AllocateHostsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// This is enabled by default. This property allows instances to be automatically
    +	// placed onto available Dedicated Hosts, when you are launching instances without
    +	// specifying a host ID.
    +	//
    +	// Default: Enabled
    +	AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"`
    +
    +	// The Availability Zone for the Dedicated Hosts.
    +	//
    +	// AvailabilityZone is a required field
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure idempotency of the
    +	// request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// Specify the instance type that you want your Dedicated Hosts to be configured
    +	// for. When you specify the instance type, that is the only instance type that
    +	// you can launch onto that host.
    +	//
    +	// InstanceType is a required field
    +	InstanceType *string `locationName:"instanceType" type:"string" required:"true"`
    +
    +	// The number of Dedicated Hosts you want to allocate to your account with these
    +	// parameters.
    +	//
    +	// Quantity is a required field
    +	Quantity *int64 `locationName:"quantity" type:"integer" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AllocateHostsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AllocateHostsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AllocateHostsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AllocateHostsInput"}
    +	if s.AvailabilityZone == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
    +	}
    +	if s.InstanceType == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceType"))
    +	}
    +	if s.Quantity == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Quantity"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAutoPlacement sets the AutoPlacement field's value.
    +func (s *AllocateHostsInput) SetAutoPlacement(v string) *AllocateHostsInput {
    +	s.AutoPlacement = &v
    +	return s
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *AllocateHostsInput) SetAvailabilityZone(v string) *AllocateHostsInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *AllocateHostsInput) SetClientToken(v string) *AllocateHostsInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *AllocateHostsInput) SetInstanceType(v string) *AllocateHostsInput {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetQuantity sets the Quantity field's value.
    +func (s *AllocateHostsInput) SetQuantity(v int64) *AllocateHostsInput {
    +	s.Quantity = &v
    +	return s
    +}
    +
    +// Contains the output of AllocateHosts.
    +type AllocateHostsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the allocated Dedicated Host. This is used when you want to launch
    +	// an instance onto a specific host.
    +	HostIds []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s AllocateHostsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AllocateHostsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetHostIds sets the HostIds field's value.
    +func (s *AllocateHostsOutput) SetHostIds(v []*string) *AllocateHostsOutput {
    +	s.HostIds = v
    +	return s
    +}
    +
    +// Contains the parameters for AssignPrivateIpAddresses.
    +type AssignPrivateIpAddressesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether to allow an IP address that is already assigned to another
    +	// network interface or instance to be reassigned to the specified network interface.
    +	AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +
    +	// One or more IP addresses to be assigned as a secondary private IP address
    +	// to the network interface. You can't specify this parameter when also specifying
    +	// a number of secondary IP addresses.
    +	//
    +	// If you don't specify an IP address, Amazon EC2 automatically selects an IP
    +	// address within the subnet range.
    +	PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"`
    +
    +	// The number of secondary IP addresses to assign to the network interface.
    +	// You can't specify this parameter when also specifying private IP addresses.
    +	SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s AssignPrivateIpAddressesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssignPrivateIpAddressesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AssignPrivateIpAddressesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AssignPrivateIpAddressesInput"}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAllowReassignment sets the AllowReassignment field's value.
    +func (s *AssignPrivateIpAddressesInput) SetAllowReassignment(v bool) *AssignPrivateIpAddressesInput {
    +	s.AllowReassignment = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *AssignPrivateIpAddressesInput) SetNetworkInterfaceId(v string) *AssignPrivateIpAddressesInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
    +func (s *AssignPrivateIpAddressesInput) SetPrivateIpAddresses(v []*string) *AssignPrivateIpAddressesInput {
    +	s.PrivateIpAddresses = v
    +	return s
    +}
    +
    +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value.
    +func (s *AssignPrivateIpAddressesInput) SetSecondaryPrivateIpAddressCount(v int64) *AssignPrivateIpAddressesInput {
    +	s.SecondaryPrivateIpAddressCount = &v
    +	return s
    +}
    +
    +type AssignPrivateIpAddressesOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AssignPrivateIpAddressesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssignPrivateIpAddressesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for AssociateAddress.
    +type AssociateAddressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-VPC] The allocation ID. This is required for EC2-VPC.
    +	AllocationId *string `type:"string"`
    +
    +	// [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic
    +	// IP address that is already associated with an instance or network interface
    +	// to be reassociated with the specified instance or network interface. Otherwise,
    +	// the operation fails. In a VPC in an EC2-VPC-only account, reassociation is
    +	// automatic, therefore you can specify false to ensure the operation fails
    +	// if the Elastic IP address is already associated with another resource.
    +	AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you
    +	// can specify either the instance ID or the network interface ID, but not both.
    +	// The operation fails if you specify an instance ID unless exactly one network
    +	// interface is attached.
    +	InstanceId *string `type:"string"`
    +
    +	// [EC2-VPC] The ID of the network interface. If the instance has more than
    +	// one network interface, you must specify a network interface ID.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// [EC2-VPC] The primary or secondary private IP address to associate with the
    +	// Elastic IP address. If no private IP address is specified, the Elastic IP
    +	// address is associated with the primary private IP address.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// The Elastic IP address. This is required for EC2-Classic.
    +	PublicIp *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AssociateAddressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssociateAddressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *AssociateAddressInput) SetAllocationId(v string) *AssociateAddressInput {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetAllowReassociation sets the AllowReassociation field's value.
    +func (s *AssociateAddressInput) SetAllowReassociation(v bool) *AssociateAddressInput {
    +	s.AllowReassociation = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AssociateAddressInput) SetDryRun(v bool) *AssociateAddressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *AssociateAddressInput) SetInstanceId(v string) *AssociateAddressInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *AssociateAddressInput) SetNetworkInterfaceId(v string) *AssociateAddressInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *AssociateAddressInput) SetPrivateIpAddress(v string) *AssociateAddressInput {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *AssociateAddressInput) SetPublicIp(v string) *AssociateAddressInput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Contains the output of AssociateAddress.
    +type AssociateAddressOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-VPC] The ID that represents the association of the Elastic IP address
    +	// with an instance.
    +	AssociationId *string `locationName:"associationId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AssociateAddressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssociateAddressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *AssociateAddressOutput) SetAssociationId(v string) *AssociateAddressOutput {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// Contains the parameters for AssociateDhcpOptions.
    +type AssociateDhcpOptionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the DHCP options set, or default to associate no DHCP options with
    +	// the VPC.
    +	//
    +	// DhcpOptionsId is a required field
    +	DhcpOptionsId *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AssociateDhcpOptionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssociateDhcpOptionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AssociateDhcpOptionsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AssociateDhcpOptionsInput"}
    +	if s.DhcpOptionsId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DhcpOptionsId"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDhcpOptionsId sets the DhcpOptionsId field's value.
    +func (s *AssociateDhcpOptionsInput) SetDhcpOptionsId(v string) *AssociateDhcpOptionsInput {
    +	s.DhcpOptionsId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AssociateDhcpOptionsInput) SetDryRun(v bool) *AssociateDhcpOptionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *AssociateDhcpOptionsInput) SetVpcId(v string) *AssociateDhcpOptionsInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +type AssociateDhcpOptionsOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AssociateDhcpOptionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssociateDhcpOptionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for AssociateRouteTable.
    +type AssociateRouteTableInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the route table.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"`
    +
    +	// The ID of the subnet.
    +	//
    +	// SubnetId is a required field
    +	SubnetId *string `locationName:"subnetId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AssociateRouteTableInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssociateRouteTableInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AssociateRouteTableInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AssociateRouteTableInput"}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +	if s.SubnetId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SubnetId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AssociateRouteTableInput) SetDryRun(v bool) *AssociateRouteTableInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *AssociateRouteTableInput) SetRouteTableId(v string) *AssociateRouteTableInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *AssociateRouteTableInput) SetSubnetId(v string) *AssociateRouteTableInput {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Contains the output of AssociateRouteTable.
    +type AssociateRouteTableOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The route table association ID (needed to disassociate the route table).
    +	AssociationId *string `locationName:"associationId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AssociateRouteTableOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssociateRouteTableOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *AssociateRouteTableOutput) SetAssociationId(v string) *AssociateRouteTableOutput {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// Contains the parameters for AttachClassicLinkVpc.
    +type AttachClassicLinkVpcInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of one or more of the VPC's security groups. You cannot specify security
    +	// groups from a different VPC.
    +	//
    +	// Groups is a required field
    +	Groups []*string `locationName:"SecurityGroupId" locationNameList:"groupId" type:"list" required:"true"`
    +
    +	// The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// The ID of a ClassicLink-enabled VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AttachClassicLinkVpcInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachClassicLinkVpcInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AttachClassicLinkVpcInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AttachClassicLinkVpcInput"}
    +	if s.Groups == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Groups"))
    +	}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AttachClassicLinkVpcInput) SetDryRun(v bool) *AttachClassicLinkVpcInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *AttachClassicLinkVpcInput) SetGroups(v []*string) *AttachClassicLinkVpcInput {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *AttachClassicLinkVpcInput) SetInstanceId(v string) *AttachClassicLinkVpcInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *AttachClassicLinkVpcInput) SetVpcId(v string) *AttachClassicLinkVpcInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of AttachClassicLinkVpc.
    +type AttachClassicLinkVpcOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s AttachClassicLinkVpcOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachClassicLinkVpcOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *AttachClassicLinkVpcOutput) SetReturn(v bool) *AttachClassicLinkVpcOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for AttachInternetGateway.
    +type AttachInternetGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the Internet gateway.
    +	//
    +	// InternetGatewayId is a required field
    +	InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AttachInternetGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachInternetGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AttachInternetGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AttachInternetGatewayInput"}
    +	if s.InternetGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InternetGatewayId"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AttachInternetGatewayInput) SetDryRun(v bool) *AttachInternetGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInternetGatewayId sets the InternetGatewayId field's value.
    +func (s *AttachInternetGatewayInput) SetInternetGatewayId(v string) *AttachInternetGatewayInput {
    +	s.InternetGatewayId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *AttachInternetGatewayInput) SetVpcId(v string) *AttachInternetGatewayInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +type AttachInternetGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AttachInternetGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachInternetGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for AttachNetworkInterface.
    +type AttachNetworkInterfaceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The index of the device for the network interface attachment.
    +	//
    +	// DeviceIndex is a required field
    +	DeviceIndex *int64 `locationName:"deviceIndex" type:"integer" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AttachNetworkInterfaceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachNetworkInterfaceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AttachNetworkInterfaceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AttachNetworkInterfaceInput"}
    +	if s.DeviceIndex == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DeviceIndex"))
    +	}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDeviceIndex sets the DeviceIndex field's value.
    +func (s *AttachNetworkInterfaceInput) SetDeviceIndex(v int64) *AttachNetworkInterfaceInput {
    +	s.DeviceIndex = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AttachNetworkInterfaceInput) SetDryRun(v bool) *AttachNetworkInterfaceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *AttachNetworkInterfaceInput) SetInstanceId(v string) *AttachNetworkInterfaceInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *AttachNetworkInterfaceInput) SetNetworkInterfaceId(v string) *AttachNetworkInterfaceInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// Contains the output of AttachNetworkInterface.
    +type AttachNetworkInterfaceOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the network interface attachment.
    +	AttachmentId *string `locationName:"attachmentId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AttachNetworkInterfaceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachNetworkInterfaceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachmentId sets the AttachmentId field's value.
    +func (s *AttachNetworkInterfaceOutput) SetAttachmentId(v string) *AttachNetworkInterfaceOutput {
    +	s.AttachmentId = &v
    +	return s
    +}
    +
    +// Contains the parameters for AttachVolume.
    +type AttachVolumeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The device name to expose to the instance (for example, /dev/sdh or xvdh).
    +	//
    +	// Device is a required field
    +	Device *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `type:"string" required:"true"`
    +
    +	// The ID of the EBS volume. The volume and instance must be within the same
    +	// Availability Zone.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AttachVolumeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachVolumeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AttachVolumeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AttachVolumeInput"}
    +	if s.Device == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Device"))
    +	}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDevice sets the Device field's value.
    +func (s *AttachVolumeInput) SetDevice(v string) *AttachVolumeInput {
    +	s.Device = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AttachVolumeInput) SetDryRun(v bool) *AttachVolumeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *AttachVolumeInput) SetInstanceId(v string) *AttachVolumeInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *AttachVolumeInput) SetVolumeId(v string) *AttachVolumeInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Contains the parameters for AttachVpnGateway.
    +type AttachVpnGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +
    +	// The ID of the virtual private gateway.
    +	//
    +	// VpnGatewayId is a required field
    +	VpnGatewayId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AttachVpnGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachVpnGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AttachVpnGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AttachVpnGatewayInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +	if s.VpnGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AttachVpnGatewayInput) SetDryRun(v bool) *AttachVpnGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *AttachVpnGatewayInput) SetVpcId(v string) *AttachVpnGatewayInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// SetVpnGatewayId sets the VpnGatewayId field's value.
    +func (s *AttachVpnGatewayInput) SetVpnGatewayId(v string) *AttachVpnGatewayInput {
    +	s.VpnGatewayId = &v
    +	return s
    +}
    +
    +// Contains the output of AttachVpnGateway.
    +type AttachVpnGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the attachment.
    +	VpcAttachment *VpcAttachment `locationName:"attachment" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AttachVpnGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttachVpnGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcAttachment sets the VpcAttachment field's value.
    +func (s *AttachVpnGatewayOutput) SetVpcAttachment(v *VpcAttachment) *AttachVpnGatewayOutput {
    +	s.VpcAttachment = v
    +	return s
    +}
    +
    +// Describes a value for a resource attribute that is a Boolean value.
    +type AttributeBooleanValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attribute value. The valid values are true or false.
    +	Value *bool `locationName:"value" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s AttributeBooleanValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttributeBooleanValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *AttributeBooleanValue) SetValue(v bool) *AttributeBooleanValue {
    +	s.Value = &v
    +	return s
    +}
    +
    +// Describes a value for a resource attribute that is a String.
    +type AttributeValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attribute value. Note that the value is case-sensitive.
    +	Value *string `locationName:"value" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AttributeValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AttributeValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *AttributeValue) SetValue(v string) *AttributeValue {
    +	s.Value = &v
    +	return s
    +}
    +
    +// Contains the parameters for AuthorizeSecurityGroupEgress.
    +type AuthorizeSecurityGroupEgressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR IP address range. We recommend that you specify the CIDR range in
    +	// a set of IP permissions instead.
    +	CidrIp *string `locationName:"cidrIp" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The start of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// We recommend that you specify the port range in a set of IP permissions instead.
    +	FromPort *int64 `locationName:"fromPort" type:"integer"`
    +
    +	// The ID of the security group.
    +	//
    +	// GroupId is a required field
    +	GroupId *string `locationName:"groupId" type:"string" required:"true"`
    +
    +	// A set of IP permissions. You can't specify a destination security group and
    +	// a CIDR IP address range.
    +	IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"`
    +
    +	// The IP protocol name or number. We recommend that you specify the protocol
    +	// in a set of IP permissions instead.
    +	IpProtocol *string `locationName:"ipProtocol" type:"string"`
    +
    +	// The name of a destination security group. To authorize outbound access to
    +	// a destination security group, we recommend that you use a set of IP permissions
    +	// instead.
    +	SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"`
    +
    +	// The AWS account number for a destination security group. To authorize outbound
    +	// access to a destination security group, we recommend that you use a set of
    +	// IP permissions instead.
    +	SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"`
    +
    +	// The end of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// We recommend that you specify the port range in a set of IP permissions instead.
    +	ToPort *int64 `locationName:"toPort" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s AuthorizeSecurityGroupEgressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AuthorizeSecurityGroupEgressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AuthorizeSecurityGroupEgressInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AuthorizeSecurityGroupEgressInput"}
    +	if s.GroupId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GroupId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCidrIp sets the CidrIp field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetCidrIp(v string) *AuthorizeSecurityGroupEgressInput {
    +	s.CidrIp = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetDryRun(v bool) *AuthorizeSecurityGroupEgressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFromPort sets the FromPort field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetFromPort(v int64) *AuthorizeSecurityGroupEgressInput {
    +	s.FromPort = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetGroupId(v string) *AuthorizeSecurityGroupEgressInput {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetIpPermissions sets the IpPermissions field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetIpPermissions(v []*IpPermission) *AuthorizeSecurityGroupEgressInput {
    +	s.IpPermissions = v
    +	return s
    +}
    +
    +// SetIpProtocol sets the IpProtocol field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetIpProtocol(v string) *AuthorizeSecurityGroupEgressInput {
    +	s.IpProtocol = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetSourceSecurityGroupName(v string) *AuthorizeSecurityGroupEgressInput {
    +	s.SourceSecurityGroupName = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetSourceSecurityGroupOwnerId(v string) *AuthorizeSecurityGroupEgressInput {
    +	s.SourceSecurityGroupOwnerId = &v
    +	return s
    +}
    +
    +// SetToPort sets the ToPort field's value.
    +func (s *AuthorizeSecurityGroupEgressInput) SetToPort(v int64) *AuthorizeSecurityGroupEgressInput {
    +	s.ToPort = &v
    +	return s
    +}
    +
    +type AuthorizeSecurityGroupEgressOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AuthorizeSecurityGroupEgressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AuthorizeSecurityGroupEgressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for AuthorizeSecurityGroupIngress.
    +type AuthorizeSecurityGroupIngressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR IP address range. You can't specify this parameter when specifying
    +	// a source security group.
    +	CidrIp *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The start of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// For the ICMP type number, use -1 to specify all ICMP types.
    +	FromPort *int64 `type:"integer"`
    +
    +	// The ID of the security group. Required for a nondefault VPC.
    +	GroupId *string `type:"string"`
    +
    +	// [EC2-Classic, default VPC] The name of the security group.
    +	GroupName *string `type:"string"`
    +
    +	// A set of IP permissions. Can be used to specify multiple rules in a single
    +	// command.
    +	IpPermissions []*IpPermission `locationNameList:"item" type:"list"`
    +
    +	// The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)).
    +	// (VPC only) Use -1 to specify all traffic. If you specify -1, traffic on all
    +	// ports is allowed, regardless of any ports you specify.
    +	IpProtocol *string `type:"string"`
    +
    +	// [EC2-Classic, default VPC] The name of the source security group. You can't
    +	// specify this parameter in combination with the following parameters: the
    +	// CIDR IP address range, the start of the port range, the IP protocol, and
    +	// the end of the port range. Creates rules that grant full ICMP, UDP, and TCP
    +	// access. To create a rule with a specific IP protocol and port range, use
    +	// a set of IP permissions instead. For EC2-VPC, the source security group must
    +	// be in the same VPC.
    +	SourceSecurityGroupName *string `type:"string"`
    +
    +	// [EC2-Classic] The AWS account number for the source security group, if the
    +	// source security group is in a different account. You can't specify this parameter
    +	// in combination with the following parameters: the CIDR IP address range,
    +	// the IP protocol, the start of the port range, and the end of the port range.
    +	// Creates rules that grant full ICMP, UDP, and TCP access. To create a rule
    +	// with a specific IP protocol and port range, use a set of IP permissions instead.
    +	SourceSecurityGroupOwnerId *string `type:"string"`
    +
    +	// The end of port range for the TCP and UDP protocols, or an ICMP code number.
    +	// For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.
    +	ToPort *int64 `type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s AuthorizeSecurityGroupIngressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AuthorizeSecurityGroupIngressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrIp sets the CidrIp field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetCidrIp(v string) *AuthorizeSecurityGroupIngressInput {
    +	s.CidrIp = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetDryRun(v bool) *AuthorizeSecurityGroupIngressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFromPort sets the FromPort field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetFromPort(v int64) *AuthorizeSecurityGroupIngressInput {
    +	s.FromPort = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetGroupId(v string) *AuthorizeSecurityGroupIngressInput {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetGroupName(v string) *AuthorizeSecurityGroupIngressInput {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetIpPermissions sets the IpPermissions field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetIpPermissions(v []*IpPermission) *AuthorizeSecurityGroupIngressInput {
    +	s.IpPermissions = v
    +	return s
    +}
    +
    +// SetIpProtocol sets the IpProtocol field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetIpProtocol(v string) *AuthorizeSecurityGroupIngressInput {
    +	s.IpProtocol = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetSourceSecurityGroupName(v string) *AuthorizeSecurityGroupIngressInput {
    +	s.SourceSecurityGroupName = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetSourceSecurityGroupOwnerId(v string) *AuthorizeSecurityGroupIngressInput {
    +	s.SourceSecurityGroupOwnerId = &v
    +	return s
    +}
    +
    +// SetToPort sets the ToPort field's value.
    +func (s *AuthorizeSecurityGroupIngressInput) SetToPort(v int64) *AuthorizeSecurityGroupIngressInput {
    +	s.ToPort = &v
    +	return s
    +}
    +
    +type AuthorizeSecurityGroupIngressOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s AuthorizeSecurityGroupIngressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AuthorizeSecurityGroupIngressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Describes an Availability Zone.
    +type AvailabilityZone struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Any messages about the Availability Zone.
    +	Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"`
    +
    +	// The name of the region.
    +	RegionName *string `locationName:"regionName" type:"string"`
    +
    +	// The state of the Availability Zone.
    +	State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"`
    +
    +	// The name of the Availability Zone.
    +	ZoneName *string `locationName:"zoneName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AvailabilityZone) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AvailabilityZone) GoString() string {
    +	return s.String()
    +}
    +
    +// SetMessages sets the Messages field's value.
    +func (s *AvailabilityZone) SetMessages(v []*AvailabilityZoneMessage) *AvailabilityZone {
    +	s.Messages = v
    +	return s
    +}
    +
    +// SetRegionName sets the RegionName field's value.
    +func (s *AvailabilityZone) SetRegionName(v string) *AvailabilityZone {
    +	s.RegionName = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *AvailabilityZone) SetState(v string) *AvailabilityZone {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetZoneName sets the ZoneName field's value.
    +func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone {
    +	s.ZoneName = &v
    +	return s
    +}
    +
    +// Describes a message about an Availability Zone.
    +type AvailabilityZoneMessage struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The message about the Availability Zone.
    +	Message *string `locationName:"message" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AvailabilityZoneMessage) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AvailabilityZoneMessage) GoString() string {
    +	return s.String()
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *AvailabilityZoneMessage) SetMessage(v string) *AvailabilityZoneMessage {
    +	s.Message = &v
    +	return s
    +}
    +
    +// The capacity information for instances launched onto the Dedicated Host.
    +type AvailableCapacity struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The total number of instances that the Dedicated Host supports.
    +	AvailableInstanceCapacity []*InstanceCapacity `locationName:"availableInstanceCapacity" locationNameList:"item" type:"list"`
    +
    +	// The number of vCPUs available on the Dedicated Host.
    +	AvailableVCpus *int64 `locationName:"availableVCpus" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s AvailableCapacity) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AvailableCapacity) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailableInstanceCapacity sets the AvailableInstanceCapacity field's value.
    +func (s *AvailableCapacity) SetAvailableInstanceCapacity(v []*InstanceCapacity) *AvailableCapacity {
    +	s.AvailableInstanceCapacity = v
    +	return s
    +}
    +
    +// SetAvailableVCpus sets the AvailableVCpus field's value.
    +func (s *AvailableCapacity) SetAvailableVCpus(v int64) *AvailableCapacity {
    +	s.AvailableVCpus = &v
    +	return s
    +}
    +
    +type BlobAttributeValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Value is automatically base64 encoded/decoded by the SDK.
    +	Value []byte `locationName:"value" type:"blob"`
    +}
    +
    +// String returns the string representation
    +func (s BlobAttributeValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s BlobAttributeValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *BlobAttributeValue) SetValue(v []byte) *BlobAttributeValue {
    +	s.Value = v
    +	return s
    +}
    +
    +// Describes a block device mapping.
    +type BlockDeviceMapping struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The device name exposed to the instance (for example, /dev/sdh or xvdh).
    +	DeviceName *string `locationName:"deviceName" type:"string"`
    +
    +	// Parameters used to automatically set up EBS volumes when the instance is
    +	// launched.
    +	Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"`
    +
    +	// Suppresses the specified device included in the block device mapping of the
    +	// AMI.
    +	NoDevice *string `locationName:"noDevice" type:"string"`
    +
    +	// The virtual device name (ephemeralN). Instance store volumes are numbered
    +	// starting from 0. An instance type with 2 available instance store volumes
    +	// can specify mappings for ephemeral0 and ephemeral1.The number of available
    +	// instance store volumes depends on the instance type. After you connect to
    +	// the instance, you must mount the volume.
    +	//
    +	// Constraints: For M3 instances, you must specify instance store volumes in
    +	// the block device mapping for the instance. When you launch an M3 instance,
    +	// we ignore any instance store volumes specified in the block device mapping
    +	// for the AMI.
    +	VirtualName *string `locationName:"virtualName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s BlockDeviceMapping) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s BlockDeviceMapping) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeviceName sets the DeviceName field's value.
    +func (s *BlockDeviceMapping) SetDeviceName(v string) *BlockDeviceMapping {
    +	s.DeviceName = &v
    +	return s
    +}
    +
    +// SetEbs sets the Ebs field's value.
    +func (s *BlockDeviceMapping) SetEbs(v *EbsBlockDevice) *BlockDeviceMapping {
    +	s.Ebs = v
    +	return s
    +}
    +
    +// SetNoDevice sets the NoDevice field's value.
    +func (s *BlockDeviceMapping) SetNoDevice(v string) *BlockDeviceMapping {
    +	s.NoDevice = &v
    +	return s
    +}
    +
    +// SetVirtualName sets the VirtualName field's value.
    +func (s *BlockDeviceMapping) SetVirtualName(v string) *BlockDeviceMapping {
    +	s.VirtualName = &v
    +	return s
    +}
    +
    +// Contains the parameters for BundleInstance.
    +type BundleInstanceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance to bundle.
    +	//
    +	// Type: String
    +	//
    +	// Default: None
    +	//
    +	// Required: Yes
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `type:"string" required:"true"`
    +
    +	// The bucket in which to store the AMI. You can specify a bucket that you already
    +	// own or a new bucket that Amazon EC2 creates on your behalf. If you specify
    +	// a bucket that belongs to someone else, Amazon EC2 returns an error.
    +	//
    +	// Storage is a required field
    +	Storage *Storage `type:"structure" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s BundleInstanceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s BundleInstanceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *BundleInstanceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "BundleInstanceInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.Storage == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Storage"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *BundleInstanceInput) SetDryRun(v bool) *BundleInstanceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *BundleInstanceInput) SetInstanceId(v string) *BundleInstanceInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetStorage sets the Storage field's value.
    +func (s *BundleInstanceInput) SetStorage(v *Storage) *BundleInstanceInput {
    +	s.Storage = v
    +	return s
    +}
    +
    +// Contains the output of BundleInstance.
    +type BundleInstanceOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the bundle task.
    +	BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s BundleInstanceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s BundleInstanceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBundleTask sets the BundleTask field's value.
    +func (s *BundleInstanceOutput) SetBundleTask(v *BundleTask) *BundleInstanceOutput {
    +	s.BundleTask = v
    +	return s
    +}
    +
    +// Describes a bundle task.
    +type BundleTask struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the bundle task.
    +	BundleId *string `locationName:"bundleId" type:"string"`
    +
    +	// If the task fails, a description of the error.
    +	BundleTaskError *BundleTaskError `locationName:"error" type:"structure"`
    +
    +	// The ID of the instance associated with this bundle task.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The level of task completion, as a percent (for example, 20%).
    +	Progress *string `locationName:"progress" type:"string"`
    +
    +	// The time this task started.
    +	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The state of the task.
    +	State *string `locationName:"state" type:"string" enum:"BundleTaskState"`
    +
    +	// The Amazon S3 storage locations.
    +	Storage *Storage `locationName:"storage" type:"structure"`
    +
    +	// The time of the most recent update for the task.
    +	UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s BundleTask) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s BundleTask) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBundleId sets the BundleId field's value.
    +func (s *BundleTask) SetBundleId(v string) *BundleTask {
    +	s.BundleId = &v
    +	return s
    +}
    +
    +// SetBundleTaskError sets the BundleTaskError field's value.
    +func (s *BundleTask) SetBundleTaskError(v *BundleTaskError) *BundleTask {
    +	s.BundleTaskError = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *BundleTask) SetInstanceId(v string) *BundleTask {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetProgress sets the Progress field's value.
    +func (s *BundleTask) SetProgress(v string) *BundleTask {
    +	s.Progress = &v
    +	return s
    +}
    +
    +// SetStartTime sets the StartTime field's value.
    +func (s *BundleTask) SetStartTime(v time.Time) *BundleTask {
    +	s.StartTime = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *BundleTask) SetState(v string) *BundleTask {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStorage sets the Storage field's value.
    +func (s *BundleTask) SetStorage(v *Storage) *BundleTask {
    +	s.Storage = v
    +	return s
    +}
    +
    +// SetUpdateTime sets the UpdateTime field's value.
    +func (s *BundleTask) SetUpdateTime(v time.Time) *BundleTask {
    +	s.UpdateTime = &v
    +	return s
    +}
    +
    +// Describes an error for BundleInstance.
    +type BundleTaskError struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The error code.
    +	Code *string `locationName:"code" type:"string"`
    +
    +	// The error message.
    +	Message *string `locationName:"message" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s BundleTaskError) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s BundleTaskError) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *BundleTaskError) SetCode(v string) *BundleTaskError {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *BundleTaskError) SetMessage(v string) *BundleTaskError {
    +	s.Message = &v
    +	return s
    +}
    +
    +// Contains the parameters for CancelBundleTask.
    +type CancelBundleTaskInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the bundle task.
    +	//
    +	// BundleId is a required field
    +	BundleId *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s CancelBundleTaskInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelBundleTaskInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CancelBundleTaskInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CancelBundleTaskInput"}
    +	if s.BundleId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("BundleId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetBundleId sets the BundleId field's value.
    +func (s *CancelBundleTaskInput) SetBundleId(v string) *CancelBundleTaskInput {
    +	s.BundleId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CancelBundleTaskInput) SetDryRun(v bool) *CancelBundleTaskInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output of CancelBundleTask.
    +type CancelBundleTaskOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the bundle task.
    +	BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CancelBundleTaskOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelBundleTaskOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBundleTask sets the BundleTask field's value.
    +func (s *CancelBundleTaskOutput) SetBundleTask(v *BundleTask) *CancelBundleTaskOutput {
    +	s.BundleTask = v
    +	return s
    +}
    +
    +// Contains the parameters for CancelConversionTask.
    +type CancelConversionTaskInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the conversion task.
    +	//
    +	// ConversionTaskId is a required field
    +	ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The reason for canceling the conversion task.
    +	ReasonMessage *string `locationName:"reasonMessage" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CancelConversionTaskInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelConversionTaskInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CancelConversionTaskInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CancelConversionTaskInput"}
    +	if s.ConversionTaskId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ConversionTaskId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetConversionTaskId sets the ConversionTaskId field's value.
    +func (s *CancelConversionTaskInput) SetConversionTaskId(v string) *CancelConversionTaskInput {
    +	s.ConversionTaskId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CancelConversionTaskInput) SetDryRun(v bool) *CancelConversionTaskInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetReasonMessage sets the ReasonMessage field's value.
    +func (s *CancelConversionTaskInput) SetReasonMessage(v string) *CancelConversionTaskInput {
    +	s.ReasonMessage = &v
    +	return s
    +}
    +
    +type CancelConversionTaskOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CancelConversionTaskOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelConversionTaskOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for CancelExportTask.
    +type CancelExportTaskInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the export task. This is the ID returned by CreateInstanceExportTask.
    +	//
    +	// ExportTaskId is a required field
    +	ExportTaskId *string `locationName:"exportTaskId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelExportTaskInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelExportTaskInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CancelExportTaskInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"}
    +	if s.ExportTaskId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ExportTaskId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetExportTaskId sets the ExportTaskId field's value.
    +func (s *CancelExportTaskInput) SetExportTaskId(v string) *CancelExportTaskInput {
    +	s.ExportTaskId = &v
    +	return s
    +}
    +
    +type CancelExportTaskOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CancelExportTaskOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelExportTaskOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for CancelImportTask.
    +type CancelImportTaskInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The reason for canceling the task.
    +	CancelReason *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The ID of the import image or import snapshot task to be canceled.
    +	ImportTaskId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CancelImportTaskInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelImportTaskInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCancelReason sets the CancelReason field's value.
    +func (s *CancelImportTaskInput) SetCancelReason(v string) *CancelImportTaskInput {
    +	s.CancelReason = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CancelImportTaskInput) SetDryRun(v bool) *CancelImportTaskInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetImportTaskId sets the ImportTaskId field's value.
    +func (s *CancelImportTaskInput) SetImportTaskId(v string) *CancelImportTaskInput {
    +	s.ImportTaskId = &v
    +	return s
    +}
    +
    +// Contains the output for CancelImportTask.
    +type CancelImportTaskOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the task being canceled.
    +	ImportTaskId *string `locationName:"importTaskId" type:"string"`
    +
    +	// The current state of the task being canceled.
    +	PreviousState *string `locationName:"previousState" type:"string"`
    +
    +	// The current state of the task being canceled.
    +	State *string `locationName:"state" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CancelImportTaskOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelImportTaskOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImportTaskId sets the ImportTaskId field's value.
    +func (s *CancelImportTaskOutput) SetImportTaskId(v string) *CancelImportTaskOutput {
    +	s.ImportTaskId = &v
    +	return s
    +}
    +
    +// SetPreviousState sets the PreviousState field's value.
    +func (s *CancelImportTaskOutput) SetPreviousState(v string) *CancelImportTaskOutput {
    +	s.PreviousState = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *CancelImportTaskOutput) SetState(v string) *CancelImportTaskOutput {
    +	s.State = &v
    +	return s
    +}
    +
    +// Contains the parameters for CancelReservedInstancesListing.
    +type CancelReservedInstancesListingInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the Reserved Instance listing.
    +	//
    +	// ReservedInstancesListingId is a required field
    +	ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelReservedInstancesListingInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelReservedInstancesListingInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CancelReservedInstancesListingInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CancelReservedInstancesListingInput"}
    +	if s.ReservedInstancesListingId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReservedInstancesListingId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetReservedInstancesListingId sets the ReservedInstancesListingId field's value.
    +func (s *CancelReservedInstancesListingInput) SetReservedInstancesListingId(v string) *CancelReservedInstancesListingInput {
    +	s.ReservedInstancesListingId = &v
    +	return s
    +}
    +
    +// Contains the output of CancelReservedInstancesListing.
    +type CancelReservedInstancesListingOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Reserved Instance listing.
    +	ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s CancelReservedInstancesListingOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelReservedInstancesListingOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesListings sets the ReservedInstancesListings field's value.
    +func (s *CancelReservedInstancesListingOutput) SetReservedInstancesListings(v []*ReservedInstancesListing) *CancelReservedInstancesListingOutput {
    +	s.ReservedInstancesListings = v
    +	return s
    +}
    +
    +// Describes a Spot fleet error.
    +type CancelSpotFleetRequestsError struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The error code.
    +	//
    +	// Code is a required field
    +	Code *string `locationName:"code" type:"string" required:"true" enum:"CancelBatchErrorCode"`
    +
    +	// The description for the error code.
    +	//
    +	// Message is a required field
    +	Message *string `locationName:"message" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotFleetRequestsError) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotFleetRequestsError) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *CancelSpotFleetRequestsError) SetCode(v string) *CancelSpotFleetRequestsError {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *CancelSpotFleetRequestsError) SetMessage(v string) *CancelSpotFleetRequestsError {
    +	s.Message = &v
    +	return s
    +}
    +
    +// Describes a Spot fleet request that was not successfully canceled.
    +type CancelSpotFleetRequestsErrorItem struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The error.
    +	//
    +	// Error is a required field
    +	Error *CancelSpotFleetRequestsError `locationName:"error" type:"structure" required:"true"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotFleetRequestsErrorItem) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotFleetRequestsErrorItem) GoString() string {
    +	return s.String()
    +}
    +
    +// SetError sets the Error field's value.
    +func (s *CancelSpotFleetRequestsErrorItem) SetError(v *CancelSpotFleetRequestsError) *CancelSpotFleetRequestsErrorItem {
    +	s.Error = v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *CancelSpotFleetRequestsErrorItem) SetSpotFleetRequestId(v string) *CancelSpotFleetRequestsErrorItem {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CancelSpotFleetRequests.
    +type CancelSpotFleetRequestsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The IDs of the Spot fleet requests.
    +	//
    +	// SpotFleetRequestIds is a required field
    +	SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"`
    +
    +	// Indicates whether to terminate instances for a Spot fleet request if it is
    +	// canceled successfully.
    +	//
    +	// TerminateInstances is a required field
    +	TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotFleetRequestsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotFleetRequestsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CancelSpotFleetRequestsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CancelSpotFleetRequestsInput"}
    +	if s.SpotFleetRequestIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestIds"))
    +	}
    +	if s.TerminateInstances == nil {
    +		invalidParams.Add(request.NewErrParamRequired("TerminateInstances"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CancelSpotFleetRequestsInput) SetDryRun(v bool) *CancelSpotFleetRequestsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestIds sets the SpotFleetRequestIds field's value.
    +func (s *CancelSpotFleetRequestsInput) SetSpotFleetRequestIds(v []*string) *CancelSpotFleetRequestsInput {
    +	s.SpotFleetRequestIds = v
    +	return s
    +}
    +
    +// SetTerminateInstances sets the TerminateInstances field's value.
    +func (s *CancelSpotFleetRequestsInput) SetTerminateInstances(v bool) *CancelSpotFleetRequestsInput {
    +	s.TerminateInstances = &v
    +	return s
    +}
    +
    +// Contains the output of CancelSpotFleetRequests.
    +type CancelSpotFleetRequestsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the Spot fleet requests that are successfully canceled.
    +	SuccessfulFleetRequests []*CancelSpotFleetRequestsSuccessItem `locationName:"successfulFleetRequestSet" locationNameList:"item" type:"list"`
    +
    +	// Information about the Spot fleet requests that are not successfully canceled.
    +	UnsuccessfulFleetRequests []*CancelSpotFleetRequestsErrorItem `locationName:"unsuccessfulFleetRequestSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotFleetRequestsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotFleetRequestsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSuccessfulFleetRequests sets the SuccessfulFleetRequests field's value.
    +func (s *CancelSpotFleetRequestsOutput) SetSuccessfulFleetRequests(v []*CancelSpotFleetRequestsSuccessItem) *CancelSpotFleetRequestsOutput {
    +	s.SuccessfulFleetRequests = v
    +	return s
    +}
    +
    +// SetUnsuccessfulFleetRequests sets the UnsuccessfulFleetRequests field's value.
    +func (s *CancelSpotFleetRequestsOutput) SetUnsuccessfulFleetRequests(v []*CancelSpotFleetRequestsErrorItem) *CancelSpotFleetRequestsOutput {
    +	s.UnsuccessfulFleetRequests = v
    +	return s
    +}
    +
    +// Describes a Spot fleet request that was successfully canceled.
    +type CancelSpotFleetRequestsSuccessItem struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The current state of the Spot fleet request.
    +	//
    +	// CurrentSpotFleetRequestState is a required field
    +	CurrentSpotFleetRequestState *string `locationName:"currentSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"`
    +
    +	// The previous state of the Spot fleet request.
    +	//
    +	// PreviousSpotFleetRequestState is a required field
    +	PreviousSpotFleetRequestState *string `locationName:"previousSpotFleetRequestState" type:"string" required:"true" enum:"BatchState"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotFleetRequestsSuccessItem) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotFleetRequestsSuccessItem) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrentSpotFleetRequestState sets the CurrentSpotFleetRequestState field's value.
    +func (s *CancelSpotFleetRequestsSuccessItem) SetCurrentSpotFleetRequestState(v string) *CancelSpotFleetRequestsSuccessItem {
    +	s.CurrentSpotFleetRequestState = &v
    +	return s
    +}
    +
    +// SetPreviousSpotFleetRequestState sets the PreviousSpotFleetRequestState field's value.
    +func (s *CancelSpotFleetRequestsSuccessItem) SetPreviousSpotFleetRequestState(v string) *CancelSpotFleetRequestsSuccessItem {
    +	s.PreviousSpotFleetRequestState = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *CancelSpotFleetRequestsSuccessItem) SetSpotFleetRequestId(v string) *CancelSpotFleetRequestsSuccessItem {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CancelSpotInstanceRequests.
    +type CancelSpotInstanceRequestsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more Spot instance request IDs.
    +	//
    +	// SpotInstanceRequestIds is a required field
    +	SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotInstanceRequestsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotInstanceRequestsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CancelSpotInstanceRequestsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CancelSpotInstanceRequestsInput"}
    +	if s.SpotInstanceRequestIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotInstanceRequestIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CancelSpotInstanceRequestsInput) SetDryRun(v bool) *CancelSpotInstanceRequestsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSpotInstanceRequestIds sets the SpotInstanceRequestIds field's value.
    +func (s *CancelSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*string) *CancelSpotInstanceRequestsInput {
    +	s.SpotInstanceRequestIds = v
    +	return s
    +}
    +
    +// Contains the output of CancelSpotInstanceRequests.
    +type CancelSpotInstanceRequestsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more Spot instance requests.
    +	CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s CancelSpotInstanceRequestsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelSpotInstanceRequestsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCancelledSpotInstanceRequests sets the CancelledSpotInstanceRequests field's value.
    +func (s *CancelSpotInstanceRequestsOutput) SetCancelledSpotInstanceRequests(v []*CancelledSpotInstanceRequest) *CancelSpotInstanceRequestsOutput {
    +	s.CancelledSpotInstanceRequests = v
    +	return s
    +}
    +
    +// Describes a request to cancel a Spot instance.
    +type CancelledSpotInstanceRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the Spot instance request.
    +	SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"`
    +
    +	// The state of the Spot instance request.
    +	State *string `locationName:"state" type:"string" enum:"CancelSpotInstanceRequestState"`
    +}
    +
    +// String returns the string representation
    +func (s CancelledSpotInstanceRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CancelledSpotInstanceRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value.
    +func (s *CancelledSpotInstanceRequest) SetSpotInstanceRequestId(v string) *CancelledSpotInstanceRequest {
    +	s.SpotInstanceRequestId = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *CancelledSpotInstanceRequest) SetState(v string) *CancelledSpotInstanceRequest {
    +	s.State = &v
    +	return s
    +}
    +
    +// Describes the ClassicLink DNS support status of a VPC.
    +type ClassicLinkDnsSupport struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether ClassicLink DNS support is enabled for the VPC.
    +	ClassicLinkDnsSupported *bool `locationName:"classicLinkDnsSupported" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ClassicLinkDnsSupport) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ClassicLinkDnsSupport) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClassicLinkDnsSupported sets the ClassicLinkDnsSupported field's value.
    +func (s *ClassicLinkDnsSupport) SetClassicLinkDnsSupported(v bool) *ClassicLinkDnsSupport {
    +	s.ClassicLinkDnsSupported = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *ClassicLinkDnsSupport) SetVpcId(v string) *ClassicLinkDnsSupport {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a linked EC2-Classic instance.
    +type ClassicLinkInstance struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A list of security groups.
    +	Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// Any tags assigned to the instance.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ClassicLinkInstance) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ClassicLinkInstance) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *ClassicLinkInstance) SetGroups(v []*GroupIdentifier) *ClassicLinkInstance {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ClassicLinkInstance) SetInstanceId(v string) *ClassicLinkInstance {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *ClassicLinkInstance) SetTags(v []*Tag) *ClassicLinkInstance {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *ClassicLinkInstance) SetVpcId(v string) *ClassicLinkInstance {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes the client-specific data.
    +type ClientData struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A user-defined comment about the disk upload.
    +	Comment *string `type:"string"`
    +
    +	// The time that the disk upload ends.
    +	UploadEnd *time.Time `type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The size of the uploaded disk image, in GiB.
    +	UploadSize *float64 `type:"double"`
    +
    +	// The time that the disk upload starts.
    +	UploadStart *time.Time `type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s ClientData) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ClientData) GoString() string {
    +	return s.String()
    +}
    +
    +// SetComment sets the Comment field's value.
    +func (s *ClientData) SetComment(v string) *ClientData {
    +	s.Comment = &v
    +	return s
    +}
    +
    +// SetUploadEnd sets the UploadEnd field's value.
    +func (s *ClientData) SetUploadEnd(v time.Time) *ClientData {
    +	s.UploadEnd = &v
    +	return s
    +}
    +
    +// SetUploadSize sets the UploadSize field's value.
    +func (s *ClientData) SetUploadSize(v float64) *ClientData {
    +	s.UploadSize = &v
    +	return s
    +}
    +
    +// SetUploadStart sets the UploadStart field's value.
    +func (s *ClientData) SetUploadStart(v time.Time) *ClientData {
    +	s.UploadStart = &v
    +	return s
    +}
    +
    +// Contains the parameters for ConfirmProductInstance.
    +type ConfirmProductInstanceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `type:"string" required:"true"`
    +
    +	// The product code. This must be a product code that you own.
    +	//
    +	// ProductCode is a required field
    +	ProductCode *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ConfirmProductInstanceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ConfirmProductInstanceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ConfirmProductInstanceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ConfirmProductInstanceInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.ProductCode == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ProductCode"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ConfirmProductInstanceInput) SetDryRun(v bool) *ConfirmProductInstanceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ConfirmProductInstanceInput) SetInstanceId(v string) *ConfirmProductInstanceInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetProductCode sets the ProductCode field's value.
    +func (s *ConfirmProductInstanceInput) SetProductCode(v string) *ConfirmProductInstanceInput {
    +	s.ProductCode = &v
    +	return s
    +}
    +
    +// Contains the output of ConfirmProductInstance.
    +type ConfirmProductInstanceOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The AWS account ID of the instance owner. This is only present if the product
    +	// code is attached to the instance.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// The return value of the request. Returns true if the specified product code
    +	// is owned by the requester and associated with the specified instance.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s ConfirmProductInstanceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ConfirmProductInstanceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *ConfirmProductInstanceOutput) SetOwnerId(v string) *ConfirmProductInstanceOutput {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstanceOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Describes a conversion task.
    +type ConversionTask struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the conversion task.
    +	//
    +	// ConversionTaskId is a required field
    +	ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"`
    +
    +	// The time when the task expires. If the upload isn't complete before the expiration
    +	// time, we automatically cancel the task.
    +	ExpirationTime *string `locationName:"expirationTime" type:"string"`
    +
    +	// If the task is for importing an instance, this contains information about
    +	// the import instance task.
    +	ImportInstance *ImportInstanceTaskDetails `locationName:"importInstance" type:"structure"`
    +
    +	// If the task is for importing a volume, this contains information about the
    +	// import volume task.
    +	ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"`
    +
    +	// The state of the conversion task.
    +	//
    +	// State is a required field
    +	State *string `locationName:"state" type:"string" required:"true" enum:"ConversionTaskState"`
    +
    +	// The status message related to the conversion task.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// Any tags assigned to the task.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s ConversionTask) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ConversionTask) GoString() string {
    +	return s.String()
    +}
    +
    +// SetConversionTaskId sets the ConversionTaskId field's value.
    +func (s *ConversionTask) SetConversionTaskId(v string) *ConversionTask {
    +	s.ConversionTaskId = &v
    +	return s
    +}
    +
    +// SetExpirationTime sets the ExpirationTime field's value.
    +func (s *ConversionTask) SetExpirationTime(v string) *ConversionTask {
    +	s.ExpirationTime = &v
    +	return s
    +}
    +
    +// SetImportInstance sets the ImportInstance field's value.
    +func (s *ConversionTask) SetImportInstance(v *ImportInstanceTaskDetails) *ConversionTask {
    +	s.ImportInstance = v
    +	return s
    +}
    +
    +// SetImportVolume sets the ImportVolume field's value.
    +func (s *ConversionTask) SetImportVolume(v *ImportVolumeTaskDetails) *ConversionTask {
    +	s.ImportVolume = v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *ConversionTask) SetState(v string) *ConversionTask {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ConversionTask) SetStatusMessage(v string) *ConversionTask {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *ConversionTask) SetTags(v []*Tag) *ConversionTask {
    +	s.Tags = v
    +	return s
    +}
    +
    +// Contains the parameters for CopyImage.
    +type CopyImageInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure idempotency of the
    +	// request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	ClientToken *string `type:"string"`
    +
    +	// A description for the new AMI in the destination region.
    +	Description *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Specifies whether the destination snapshots of the copied image should be
    +	// encrypted. The default CMK for EBS is used unless a non-default AWS Key Management
    +	// Service (AWS KMS) CMK is specified with KmsKeyId. For more information, see
    +	// Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	Encrypted *bool `locationName:"encrypted" type:"boolean"`
    +
    +	// The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when
    +	// encrypting the snapshots of an image during a copy operation. This parameter
    +	// is only required if you want to use a non-default CMK; if this parameter
    +	// is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms
    +	// namespace, followed by the region of the CMK, the AWS account ID of the CMK
    +	// owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
    +	// The specified CMK must exist in the region that the snapshot is being copied
    +	// to. If a KmsKeyId is specified, the Encrypted flag must also be set.
    +	KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
    +
    +	// The name of the new AMI in the destination region.
    +	//
    +	// Name is a required field
    +	Name *string `type:"string" required:"true"`
    +
    +	// The ID of the AMI to copy.
    +	//
    +	// SourceImageId is a required field
    +	SourceImageId *string `type:"string" required:"true"`
    +
    +	// The name of the region that contains the AMI to copy.
    +	//
    +	// SourceRegion is a required field
    +	SourceRegion *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CopyImageInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CopyImageInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CopyImageInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CopyImageInput"}
    +	if s.Name == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Name"))
    +	}
    +	if s.SourceImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SourceImageId"))
    +	}
    +	if s.SourceRegion == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SourceRegion"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CopyImageInput) SetClientToken(v string) *CopyImageInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CopyImageInput) SetDescription(v string) *CopyImageInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CopyImageInput) SetDryRun(v bool) *CopyImageInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *CopyImageInput) SetEncrypted(v bool) *CopyImageInput {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetKmsKeyId sets the KmsKeyId field's value.
    +func (s *CopyImageInput) SetKmsKeyId(v string) *CopyImageInput {
    +	s.KmsKeyId = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *CopyImageInput) SetName(v string) *CopyImageInput {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetSourceImageId sets the SourceImageId field's value.
    +func (s *CopyImageInput) SetSourceImageId(v string) *CopyImageInput {
    +	s.SourceImageId = &v
    +	return s
    +}
    +
    +// SetSourceRegion sets the SourceRegion field's value.
    +func (s *CopyImageInput) SetSourceRegion(v string) *CopyImageInput {
    +	s.SourceRegion = &v
    +	return s
    +}
    +
    +// Contains the output of CopyImage.
    +type CopyImageOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the new AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CopyImageOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CopyImageOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *CopyImageOutput) SetImageId(v string) *CopyImageOutput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CopySnapshot.
    +type CopySnapshotInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the EBS snapshot.
    +	Description *string `type:"string"`
    +
    +	// The destination region to use in the PresignedUrl parameter of a snapshot
    +	// copy operation. This parameter is only valid for specifying the destination
    +	// region in a PresignedUrl parameter, where it is required.
    +	//
    +	// CopySnapshot sends the snapshot copy to the regional endpoint that you send
    +	// the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI,
    +	// this is specified with the --region parameter or the default region in your
    +	// AWS configuration file).
    +	DestinationRegion *string `locationName:"destinationRegion" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Specifies whether the destination snapshot should be encrypted. You can encrypt
    +	// a copy of an unencrypted snapshot using this flag, but you cannot use it
    +	// to create an unencrypted copy from an encrypted snapshot. Your default CMK
    +	// for EBS is used unless a non-default AWS Key Management Service (AWS KMS)
    +	// CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption
    +	// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in
    +	// the Amazon Elastic Compute Cloud User Guide.
    +	Encrypted *bool `locationName:"encrypted" type:"boolean"`
    +
    +	// The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when
    +	// creating the snapshot copy. This parameter is only required if you want to
    +	// use a non-default CMK; if this parameter is not specified, the default CMK
    +	// for EBS is used. The ARN contains the arn:aws:kms namespace, followed by
    +	// the region of the CMK, the AWS account ID of the CMK owner, the key namespace,
    +	// and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
    +	// The specified CMK must exist in the region that the snapshot is being copied
    +	// to. If a KmsKeyId is specified, the Encrypted flag must also be set.
    +	KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
    +
    +	// The pre-signed URL that facilitates copying an encrypted snapshot. This parameter
    +	// is only required when copying an encrypted snapshot with the Amazon EC2 Query
    +	// API; it is available as an optional parameter in all other cases. The PresignedUrl
    +	// should use the snapshot source endpoint, the CopySnapshot action, and include
    +	// the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The
    +	// PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots
    +	// are stored in Amazon S3, the signing algorithm for this parameter uses the
    +	// same logic that is described in Authenticating Requests by Using Query Parameters
    +	// (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
    +	// in the Amazon Simple Storage Service API Reference. An invalid or improperly
    +	// signed PresignedUrl will cause the copy operation to fail asynchronously,
    +	// and the snapshot will move to an error state.
    +	PresignedUrl *string `locationName:"presignedUrl" type:"string"`
    +
    +	// The ID of the region that contains the snapshot to be copied.
    +	//
    +	// SourceRegion is a required field
    +	SourceRegion *string `type:"string" required:"true"`
    +
    +	// The ID of the EBS snapshot to copy.
    +	//
    +	// SourceSnapshotId is a required field
    +	SourceSnapshotId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CopySnapshotInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CopySnapshotInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CopySnapshotInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"}
    +	if s.SourceRegion == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SourceRegion"))
    +	}
    +	if s.SourceSnapshotId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SourceSnapshotId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CopySnapshotInput) SetDescription(v string) *CopySnapshotInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDestinationRegion sets the DestinationRegion field's value.
    +func (s *CopySnapshotInput) SetDestinationRegion(v string) *CopySnapshotInput {
    +	s.DestinationRegion = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CopySnapshotInput) SetDryRun(v bool) *CopySnapshotInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *CopySnapshotInput) SetEncrypted(v bool) *CopySnapshotInput {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetKmsKeyId sets the KmsKeyId field's value.
    +func (s *CopySnapshotInput) SetKmsKeyId(v string) *CopySnapshotInput {
    +	s.KmsKeyId = &v
    +	return s
    +}
    +
    +// SetPresignedUrl sets the PresignedUrl field's value.
    +func (s *CopySnapshotInput) SetPresignedUrl(v string) *CopySnapshotInput {
    +	s.PresignedUrl = &v
    +	return s
    +}
    +
    +// SetSourceRegion sets the SourceRegion field's value.
    +func (s *CopySnapshotInput) SetSourceRegion(v string) *CopySnapshotInput {
    +	s.SourceRegion = &v
    +	return s
    +}
    +
    +// SetSourceSnapshotId sets the SourceSnapshotId field's value.
    +func (s *CopySnapshotInput) SetSourceSnapshotId(v string) *CopySnapshotInput {
    +	s.SourceSnapshotId = &v
    +	return s
    +}
    +
    +// Contains the output of CopySnapshot.
    +type CopySnapshotOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the new snapshot.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CopySnapshotOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CopySnapshotOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *CopySnapshotOutput) SetSnapshotId(v string) *CopySnapshotOutput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CreateCustomerGateway.
    +type CreateCustomerGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// For devices that support BGP, the customer gateway's BGP ASN.
    +	//
    +	// Default: 65000
    +	//
    +	// BgpAsn is a required field
    +	BgpAsn *int64 `type:"integer" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The Internet-routable IP address for the customer gateway's outside interface.
    +	// The address must be static.
    +	//
    +	// PublicIp is a required field
    +	PublicIp *string `locationName:"IpAddress" type:"string" required:"true"`
    +
    +	// The type of VPN connection that this customer gateway supports (ipsec.1).
    +	//
    +	// Type is a required field
    +	Type *string `type:"string" required:"true" enum:"GatewayType"`
    +}
    +
    +// String returns the string representation
    +func (s CreateCustomerGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateCustomerGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateCustomerGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateCustomerGatewayInput"}
    +	if s.BgpAsn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("BgpAsn"))
    +	}
    +	if s.PublicIp == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PublicIp"))
    +	}
    +	if s.Type == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Type"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetBgpAsn sets the BgpAsn field's value.
    +func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayInput {
    +	s.BgpAsn = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateCustomerGatewayInput) SetDryRun(v bool) *CreateCustomerGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *CreateCustomerGatewayInput) SetPublicIp(v string) *CreateCustomerGatewayInput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *CreateCustomerGatewayInput) SetType(v string) *CreateCustomerGatewayInput {
    +	s.Type = &v
    +	return s
    +}
    +
    +// Contains the output of CreateCustomerGateway.
    +type CreateCustomerGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the customer gateway.
    +	CustomerGateway *CustomerGateway `locationName:"customerGateway" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateCustomerGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateCustomerGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCustomerGateway sets the CustomerGateway field's value.
    +func (s *CreateCustomerGatewayOutput) SetCustomerGateway(v *CustomerGateway) *CreateCustomerGatewayOutput {
    +	s.CustomerGateway = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateDhcpOptions.
    +type CreateDhcpOptionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A DHCP configuration option.
    +	//
    +	// DhcpConfigurations is a required field
    +	DhcpConfigurations []*NewDhcpConfiguration `locationName:"dhcpConfiguration" locationNameList:"item" type:"list" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s CreateDhcpOptionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateDhcpOptionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateDhcpOptionsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateDhcpOptionsInput"}
    +	if s.DhcpConfigurations == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DhcpConfigurations"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDhcpConfigurations sets the DhcpConfigurations field's value.
    +func (s *CreateDhcpOptionsInput) SetDhcpConfigurations(v []*NewDhcpConfiguration) *CreateDhcpOptionsInput {
    +	s.DhcpConfigurations = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateDhcpOptionsInput) SetDryRun(v bool) *CreateDhcpOptionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output of CreateDhcpOptions.
    +type CreateDhcpOptionsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A set of DHCP options.
    +	DhcpOptions *DhcpOptions `locationName:"dhcpOptions" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateDhcpOptionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateDhcpOptionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDhcpOptions sets the DhcpOptions field's value.
    +func (s *CreateDhcpOptionsOutput) SetDhcpOptions(v *DhcpOptions) *CreateDhcpOptionsOutput {
    +	s.DhcpOptions = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateFlowLogs.
    +type CreateFlowLogsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure the idempotency of
    +	// the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
    +	ClientToken *string `type:"string"`
    +
    +	// The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs
    +	// log group.
    +	//
    +	// DeliverLogsPermissionArn is a required field
    +	DeliverLogsPermissionArn *string `type:"string" required:"true"`
    +
    +	// The name of the CloudWatch log group.
    +	//
    +	// LogGroupName is a required field
    +	LogGroupName *string `type:"string" required:"true"`
    +
    +	// One or more subnet, network interface, or VPC IDs.
    +	//
    +	// Constraints: Maximum of 1000 resources
    +	//
    +	// ResourceIds is a required field
    +	ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"`
    +
    +	// The type of resource on which to create the flow log.
    +	//
    +	// ResourceType is a required field
    +	ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"`
    +
    +	// The type of traffic to log.
    +	//
    +	// TrafficType is a required field
    +	TrafficType *string `type:"string" required:"true" enum:"TrafficType"`
    +}
    +
    +// String returns the string representation
    +func (s CreateFlowLogsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateFlowLogsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateFlowLogsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateFlowLogsInput"}
    +	if s.DeliverLogsPermissionArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DeliverLogsPermissionArn"))
    +	}
    +	if s.LogGroupName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("LogGroupName"))
    +	}
    +	if s.ResourceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ResourceIds"))
    +	}
    +	if s.ResourceType == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ResourceType"))
    +	}
    +	if s.TrafficType == nil {
    +		invalidParams.Add(request.NewErrParamRequired("TrafficType"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateFlowLogsInput) SetClientToken(v string) *CreateFlowLogsInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDeliverLogsPermissionArn sets the DeliverLogsPermissionArn field's value.
    +func (s *CreateFlowLogsInput) SetDeliverLogsPermissionArn(v string) *CreateFlowLogsInput {
    +	s.DeliverLogsPermissionArn = &v
    +	return s
    +}
    +
    +// SetLogGroupName sets the LogGroupName field's value.
    +func (s *CreateFlowLogsInput) SetLogGroupName(v string) *CreateFlowLogsInput {
    +	s.LogGroupName = &v
    +	return s
    +}
    +
    +// SetResourceIds sets the ResourceIds field's value.
    +func (s *CreateFlowLogsInput) SetResourceIds(v []*string) *CreateFlowLogsInput {
    +	s.ResourceIds = v
    +	return s
    +}
    +
    +// SetResourceType sets the ResourceType field's value.
    +func (s *CreateFlowLogsInput) SetResourceType(v string) *CreateFlowLogsInput {
    +	s.ResourceType = &v
    +	return s
    +}
    +
    +// SetTrafficType sets the TrafficType field's value.
    +func (s *CreateFlowLogsInput) SetTrafficType(v string) *CreateFlowLogsInput {
    +	s.TrafficType = &v
    +	return s
    +}
    +
    +// Contains the output of CreateFlowLogs.
    +type CreateFlowLogsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure the idempotency of
    +	// the request.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// The IDs of the flow logs.
    +	FlowLogIds []*string `locationName:"flowLogIdSet" locationNameList:"item" type:"list"`
    +
    +	// Information about the flow logs that could not be created successfully.
    +	Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s CreateFlowLogsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateFlowLogsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateFlowLogsOutput) SetClientToken(v string) *CreateFlowLogsOutput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetFlowLogIds sets the FlowLogIds field's value.
    +func (s *CreateFlowLogsOutput) SetFlowLogIds(v []*string) *CreateFlowLogsOutput {
    +	s.FlowLogIds = v
    +	return s
    +}
    +
    +// SetUnsuccessful sets the Unsuccessful field's value.
    +func (s *CreateFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *CreateFlowLogsOutput {
    +	s.Unsuccessful = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateImage.
    +type CreateImageInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more block device mappings.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
    +
    +	// A description for the new image.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// A name for the new image.
    +	//
    +	// Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets
    +	// ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('),
    +	// at-signs (@), or underscores(_)
    +	//
    +	// Name is a required field
    +	Name *string `locationName:"name" type:"string" required:"true"`
    +
    +	// By default, Amazon EC2 attempts to shut down and reboot the instance before
    +	// creating the image. If the 'No Reboot' option is set, Amazon EC2 doesn't
    +	// shut down the instance before creating the image. When this option is used,
    +	// file system integrity on the created image can't be guaranteed.
    +	NoReboot *bool `locationName:"noReboot" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s CreateImageInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateImageInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateImageInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateImageInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.Name == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Name"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *CreateImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *CreateImageInput {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CreateImageInput) SetDescription(v string) *CreateImageInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateImageInput) SetDryRun(v bool) *CreateImageInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *CreateImageInput) SetInstanceId(v string) *CreateImageInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *CreateImageInput) SetName(v string) *CreateImageInput {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetNoReboot sets the NoReboot field's value.
    +func (s *CreateImageInput) SetNoReboot(v bool) *CreateImageInput {
    +	s.NoReboot = &v
    +	return s
    +}
    +
    +// Contains the output of CreateImage.
    +type CreateImageOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the new AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateImageOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateImageOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *CreateImageOutput) SetImageId(v string) *CreateImageOutput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CreateInstanceExportTask.
    +type CreateInstanceExportTaskInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the conversion task or the resource being exported. The
    +	// maximum length is 255 bytes.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The format and location for an instance export task.
    +	ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// The target virtualization environment.
    +	TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"`
    +}
    +
    +// String returns the string representation
    +func (s CreateInstanceExportTaskInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateInstanceExportTaskInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateInstanceExportTaskInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateInstanceExportTaskInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CreateInstanceExportTaskInput) SetDescription(v string) *CreateInstanceExportTaskInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetExportToS3Task sets the ExportToS3Task field's value.
    +func (s *CreateInstanceExportTaskInput) SetExportToS3Task(v *ExportToS3TaskSpecification) *CreateInstanceExportTaskInput {
    +	s.ExportToS3Task = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *CreateInstanceExportTaskInput) SetInstanceId(v string) *CreateInstanceExportTaskInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetTargetEnvironment sets the TargetEnvironment field's value.
    +func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateInstanceExportTaskInput {
    +	s.TargetEnvironment = &v
    +	return s
    +}
    +
    +// Contains the output for CreateInstanceExportTask.
    +type CreateInstanceExportTaskOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the instance export task.
    +	ExportTask *ExportTask `locationName:"exportTask" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateInstanceExportTaskOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateInstanceExportTaskOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetExportTask sets the ExportTask field's value.
    +func (s *CreateInstanceExportTaskOutput) SetExportTask(v *ExportTask) *CreateInstanceExportTaskOutput {
    +	s.ExportTask = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateInternetGateway.
    +type CreateInternetGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s CreateInternetGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateInternetGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateInternetGatewayInput) SetDryRun(v bool) *CreateInternetGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output of CreateInternetGateway.
    +type CreateInternetGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the Internet gateway.
    +	InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateInternetGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateInternetGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInternetGateway sets the InternetGateway field's value.
    +func (s *CreateInternetGatewayOutput) SetInternetGateway(v *InternetGateway) *CreateInternetGatewayOutput {
    +	s.InternetGateway = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateKeyPair.
    +type CreateKeyPairInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// A unique name for the key pair.
    +	//
    +	// Constraints: Up to 255 ASCII characters
    +	//
    +	// KeyName is a required field
    +	KeyName *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateKeyPairInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateKeyPairInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateKeyPairInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateKeyPairInput"}
    +	if s.KeyName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("KeyName"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateKeyPairInput) SetDryRun(v bool) *CreateKeyPairInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *CreateKeyPairInput) SetKeyName(v string) *CreateKeyPairInput {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// Describes a key pair.
    +type CreateKeyPairOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The SHA-1 digest of the DER encoded private key.
    +	KeyFingerprint *string `locationName:"keyFingerprint" type:"string"`
    +
    +	// An unencrypted PEM encoded RSA private key.
    +	KeyMaterial *string `locationName:"keyMaterial" type:"string"`
    +
    +	// The name of the key pair.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateKeyPairOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateKeyPairOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKeyFingerprint sets the KeyFingerprint field's value.
    +func (s *CreateKeyPairOutput) SetKeyFingerprint(v string) *CreateKeyPairOutput {
    +	s.KeyFingerprint = &v
    +	return s
    +}
    +
    +// SetKeyMaterial sets the KeyMaterial field's value.
    +func (s *CreateKeyPairOutput) SetKeyMaterial(v string) *CreateKeyPairOutput {
    +	s.KeyMaterial = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *CreateKeyPairOutput) SetKeyName(v string) *CreateKeyPairOutput {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// Contains the parameters for CreateNatGateway.
    +type CreateNatGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The allocation ID of an Elastic IP address to associate with the NAT gateway.
    +	// If the Elastic IP address is associated with another resource, you must first
    +	// disassociate it.
    +	//
    +	// AllocationId is a required field
    +	AllocationId *string `type:"string" required:"true"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure the idempotency of
    +	// the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	//
    +	// Constraint: Maximum 64 ASCII characters.
    +	ClientToken *string `type:"string"`
    +
    +	// The subnet in which to create the NAT gateway.
    +	//
    +	// SubnetId is a required field
    +	SubnetId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNatGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNatGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateNatGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateNatGatewayInput"}
    +	if s.AllocationId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AllocationId"))
    +	}
    +	if s.SubnetId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SubnetId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *CreateNatGatewayInput) SetAllocationId(v string) *CreateNatGatewayInput {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateNatGatewayInput) SetClientToken(v string) *CreateNatGatewayInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *CreateNatGatewayInput) SetSubnetId(v string) *CreateNatGatewayInput {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateNatGateway.
    +type CreateNatGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier to ensure the idempotency of the request.
    +	// Only returned if a client token was provided in the request.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// Information about the NAT gateway.
    +	NatGateway *NatGateway `locationName:"natGateway" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNatGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNatGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateNatGatewayOutput) SetClientToken(v string) *CreateNatGatewayOutput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetNatGateway sets the NatGateway field's value.
    +func (s *CreateNatGatewayOutput) SetNatGateway(v *NatGateway) *CreateNatGatewayOutput {
    +	s.NatGateway = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateNetworkAclEntry.
    +type CreateNetworkAclEntryInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).
    +	//
    +	// CidrBlock is a required field
    +	CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Indicates whether this is an egress rule (rule is applied to traffic leaving
    +	// the subnet).
    +	//
    +	// Egress is a required field
    +	Egress *bool `locationName:"egress" type:"boolean" required:"true"`
    +
    +	// ICMP protocol: The ICMP type and code. Required if specifying ICMP for the
    +	// protocol.
    +	IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"`
    +
    +	// The ID of the network ACL.
    +	//
    +	// NetworkAclId is a required field
    +	NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"`
    +
    +	// TCP or UDP protocols: The range of ports the rule applies to.
    +	PortRange *PortRange `locationName:"portRange" type:"structure"`
    +
    +	// The protocol. A value of -1 means all protocols.
    +	//
    +	// Protocol is a required field
    +	Protocol *string `locationName:"protocol" type:"string" required:"true"`
    +
    +	// Indicates whether to allow or deny the traffic that matches the rule.
    +	//
    +	// RuleAction is a required field
    +	RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"`
    +
    +	// The rule number for the entry (for example, 100). ACL entries are processed
    +	// in ascending order by rule number.
    +	//
    +	// Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is
    +	// reserved for internal use.
    +	//
    +	// RuleNumber is a required field
    +	RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNetworkAclEntryInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNetworkAclEntryInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateNetworkAclEntryInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateNetworkAclEntryInput"}
    +	if s.CidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("CidrBlock"))
    +	}
    +	if s.Egress == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Egress"))
    +	}
    +	if s.NetworkAclId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkAclId"))
    +	}
    +	if s.Protocol == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Protocol"))
    +	}
    +	if s.RuleAction == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RuleAction"))
    +	}
    +	if s.RuleNumber == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RuleNumber"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *CreateNetworkAclEntryInput) SetCidrBlock(v string) *CreateNetworkAclEntryInput {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateNetworkAclEntryInput) SetDryRun(v bool) *CreateNetworkAclEntryInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEgress sets the Egress field's value.
    +func (s *CreateNetworkAclEntryInput) SetEgress(v bool) *CreateNetworkAclEntryInput {
    +	s.Egress = &v
    +	return s
    +}
    +
    +// SetIcmpTypeCode sets the IcmpTypeCode field's value.
    +func (s *CreateNetworkAclEntryInput) SetIcmpTypeCode(v *IcmpTypeCode) *CreateNetworkAclEntryInput {
    +	s.IcmpTypeCode = v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *CreateNetworkAclEntryInput) SetNetworkAclId(v string) *CreateNetworkAclEntryInput {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +// SetPortRange sets the PortRange field's value.
    +func (s *CreateNetworkAclEntryInput) SetPortRange(v *PortRange) *CreateNetworkAclEntryInput {
    +	s.PortRange = v
    +	return s
    +}
    +
    +// SetProtocol sets the Protocol field's value.
    +func (s *CreateNetworkAclEntryInput) SetProtocol(v string) *CreateNetworkAclEntryInput {
    +	s.Protocol = &v
    +	return s
    +}
    +
    +// SetRuleAction sets the RuleAction field's value.
    +func (s *CreateNetworkAclEntryInput) SetRuleAction(v string) *CreateNetworkAclEntryInput {
    +	s.RuleAction = &v
    +	return s
    +}
    +
    +// SetRuleNumber sets the RuleNumber field's value.
    +func (s *CreateNetworkAclEntryInput) SetRuleNumber(v int64) *CreateNetworkAclEntryInput {
    +	s.RuleNumber = &v
    +	return s
    +}
    +
    +type CreateNetworkAclEntryOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNetworkAclEntryOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNetworkAclEntryOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for CreateNetworkAcl.
    +type CreateNetworkAclInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNetworkAclInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNetworkAclInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateNetworkAclInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateNetworkAclInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateNetworkAclInput) SetDryRun(v bool) *CreateNetworkAclInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *CreateNetworkAclInput) SetVpcId(v string) *CreateNetworkAclInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateNetworkAcl.
    +type CreateNetworkAclOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the network ACL.
    +	NetworkAcl *NetworkAcl `locationName:"networkAcl" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNetworkAclOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNetworkAclOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNetworkAcl sets the NetworkAcl field's value.
    +func (s *CreateNetworkAclOutput) SetNetworkAcl(v *NetworkAcl) *CreateNetworkAclOutput {
    +	s.NetworkAcl = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateNetworkInterface.
    +type CreateNetworkInterfaceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the network interface.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The IDs of one or more security groups.
    +	Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// The primary private IP address of the network interface. If you don't specify
    +	// an IP address, Amazon EC2 selects one for you from the subnet range. If you
    +	// specify an IP address, you cannot indicate any IP addresses specified in
    +	// privateIpAddresses as primary (only one IP address can be designated as primary).
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// One or more private IP addresses.
    +	PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddresses" locationNameList:"item" type:"list"`
    +
    +	// The number of secondary private IP addresses to assign to a network interface.
    +	// When you specify a number of secondary IP addresses, Amazon EC2 selects these
    +	// IP addresses within the subnet range. You can't specify this option and specify
    +	// more than one private IP address using privateIpAddresses.
    +	//
    +	// The number of IP addresses you can assign to a network interface varies by
    +	// instance type. For more information, see Private IP Addresses Per ENI Per
    +	// Instance Type (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"`
    +
    +	// The ID of the subnet to associate with the network interface.
    +	//
    +	// SubnetId is a required field
    +	SubnetId *string `locationName:"subnetId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNetworkInterfaceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNetworkInterfaceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateNetworkInterfaceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInterfaceInput"}
    +	if s.SubnetId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SubnetId"))
    +	}
    +	if s.PrivateIpAddresses != nil {
    +		for i, v := range s.PrivateIpAddresses {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CreateNetworkInterfaceInput) SetDescription(v string) *CreateNetworkInterfaceInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateNetworkInterfaceInput) SetDryRun(v bool) *CreateNetworkInterfaceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *CreateNetworkInterfaceInput) SetGroups(v []*string) *CreateNetworkInterfaceInput {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *CreateNetworkInterfaceInput) SetPrivateIpAddress(v string) *CreateNetworkInterfaceInput {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
    +func (s *CreateNetworkInterfaceInput) SetPrivateIpAddresses(v []*PrivateIpAddressSpecification) *CreateNetworkInterfaceInput {
    +	s.PrivateIpAddresses = v
    +	return s
    +}
    +
    +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value.
    +func (s *CreateNetworkInterfaceInput) SetSecondaryPrivateIpAddressCount(v int64) *CreateNetworkInterfaceInput {
    +	s.SecondaryPrivateIpAddressCount = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *CreateNetworkInterfaceInput) SetSubnetId(v string) *CreateNetworkInterfaceInput {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateNetworkInterface.
    +type CreateNetworkInterfaceOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the network interface.
    +	NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateNetworkInterfaceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateNetworkInterfaceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNetworkInterface sets the NetworkInterface field's value.
    +func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) *CreateNetworkInterfaceOutput {
    +	s.NetworkInterface = v
    +	return s
    +}
    +
    +// Contains the parameters for CreatePlacementGroup.
    +type CreatePlacementGroupInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// A name for the placement group.
    +	//
    +	// Constraints: Up to 255 ASCII characters
    +	//
    +	// GroupName is a required field
    +	GroupName *string `locationName:"groupName" type:"string" required:"true"`
    +
    +	// The placement strategy.
    +	//
    +	// Strategy is a required field
    +	Strategy *string `locationName:"strategy" type:"string" required:"true" enum:"PlacementStrategy"`
    +}
    +
    +// String returns the string representation
    +func (s CreatePlacementGroupInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreatePlacementGroupInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreatePlacementGroupInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreatePlacementGroupInput"}
    +	if s.GroupName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GroupName"))
    +	}
    +	if s.Strategy == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Strategy"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreatePlacementGroupInput) SetDryRun(v bool) *CreatePlacementGroupInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *CreatePlacementGroupInput) SetGroupName(v string) *CreatePlacementGroupInput {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetStrategy sets the Strategy field's value.
    +func (s *CreatePlacementGroupInput) SetStrategy(v string) *CreatePlacementGroupInput {
    +	s.Strategy = &v
    +	return s
    +}
    +
    +type CreatePlacementGroupOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreatePlacementGroupOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreatePlacementGroupOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for CreateReservedInstancesListing.
    +type CreateReservedInstancesListingInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure idempotency of your
    +	// listings. This helps avoid duplicate listings. For more information, see
    +	// Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	//
    +	// ClientToken is a required field
    +	ClientToken *string `locationName:"clientToken" type:"string" required:"true"`
    +
    +	// The number of instances that are a part of a Reserved Instance account to
    +	// be listed in the Reserved Instance Marketplace. This number should be less
    +	// than or equal to the instance count associated with the Reserved Instance
    +	// ID specified in this call.
    +	//
    +	// InstanceCount is a required field
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"`
    +
    +	// A list specifying the price of the Standard Reserved Instance for each month
    +	// remaining in the Reserved Instance term.
    +	//
    +	// PriceSchedules is a required field
    +	PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"`
    +
    +	// The ID of the active Standard Reserved Instance.
    +	//
    +	// ReservedInstancesId is a required field
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateReservedInstancesListingInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateReservedInstancesListingInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateReservedInstancesListingInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateReservedInstancesListingInput"}
    +	if s.ClientToken == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ClientToken"))
    +	}
    +	if s.InstanceCount == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceCount"))
    +	}
    +	if s.PriceSchedules == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PriceSchedules"))
    +	}
    +	if s.ReservedInstancesId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReservedInstancesId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateReservedInstancesListingInput) SetClientToken(v string) *CreateReservedInstancesListingInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *CreateReservedInstancesListingInput) SetInstanceCount(v int64) *CreateReservedInstancesListingInput {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetPriceSchedules sets the PriceSchedules field's value.
    +func (s *CreateReservedInstancesListingInput) SetPriceSchedules(v []*PriceScheduleSpecification) *CreateReservedInstancesListingInput {
    +	s.PriceSchedules = v
    +	return s
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *CreateReservedInstancesListingInput) SetReservedInstancesId(v string) *CreateReservedInstancesListingInput {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateReservedInstancesListing.
    +type CreateReservedInstancesListingOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the Standard Reserved Instance listing.
    +	ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s CreateReservedInstancesListingOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateReservedInstancesListingOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesListings sets the ReservedInstancesListings field's value.
    +func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v []*ReservedInstancesListing) *CreateReservedInstancesListingOutput {
    +	s.ReservedInstancesListings = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateRoute.
    +type CreateRouteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR address block used for the destination match. Routing decisions
    +	// are based on the most specific match.
    +	//
    +	// DestinationCidrBlock is a required field
    +	DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of an Internet gateway or virtual private gateway attached to your
    +	// VPC.
    +	GatewayId *string `locationName:"gatewayId" type:"string"`
    +
    +	// The ID of a NAT instance in your VPC. The operation fails if you specify
    +	// an instance ID unless exactly one network interface is attached.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The ID of a NAT gateway.
    +	NatGatewayId *string `locationName:"natGatewayId" type:"string"`
    +
    +	// The ID of a network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The ID of the route table for the route.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"`
    +
    +	// The ID of a VPC peering connection.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateRouteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateRouteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateRouteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateRouteInput"}
    +	if s.DestinationCidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
    +	}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *CreateRouteInput) SetDestinationCidrBlock(v string) *CreateRouteInput {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateRouteInput) SetDryRun(v bool) *CreateRouteInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGatewayId sets the GatewayId field's value.
    +func (s *CreateRouteInput) SetGatewayId(v string) *CreateRouteInput {
    +	s.GatewayId = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *CreateRouteInput) SetInstanceId(v string) *CreateRouteInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetNatGatewayId sets the NatGatewayId field's value.
    +func (s *CreateRouteInput) SetNatGatewayId(v string) *CreateRouteInput {
    +	s.NatGatewayId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *CreateRouteInput) SetNetworkInterfaceId(v string) *CreateRouteInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *CreateRouteInput) SetRouteTableId(v string) *CreateRouteInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *CreateRouteInput) SetVpcPeeringConnectionId(v string) *CreateRouteInput {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateRoute.
    +type CreateRouteOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s CreateRouteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateRouteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *CreateRouteOutput) SetReturn(v bool) *CreateRouteOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for CreateRouteTable.
    +type CreateRouteTableInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateRouteTableInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateRouteTableInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateRouteTableInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateRouteTableInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateRouteTableInput) SetDryRun(v bool) *CreateRouteTableInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateRouteTable.
    +type CreateRouteTableOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the route table.
    +	RouteTable *RouteTable `locationName:"routeTable" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateRouteTableOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateRouteTableOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetRouteTable sets the RouteTable field's value.
    +func (s *CreateRouteTableOutput) SetRouteTable(v *RouteTable) *CreateRouteTableOutput {
    +	s.RouteTable = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateSecurityGroup.
    +type CreateSecurityGroupInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the security group. This is informational only.
    +	//
    +	// Constraints: Up to 255 characters in length
    +	//
    +	// Constraints for EC2-Classic: ASCII characters
    +	//
    +	// Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*
    +	//
    +	// Description is a required field
    +	Description *string `locationName:"GroupDescription" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The name of the security group.
    +	//
    +	// Constraints: Up to 255 characters in length
    +	//
    +	// Constraints for EC2-Classic: ASCII characters
    +	//
    +	// Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*
    +	//
    +	// GroupName is a required field
    +	GroupName *string `type:"string" required:"true"`
    +
    +	// [EC2-VPC] The ID of the VPC. Required for EC2-VPC.
    +	VpcId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSecurityGroupInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSecurityGroupInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateSecurityGroupInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateSecurityGroupInput"}
    +	if s.Description == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Description"))
    +	}
    +	if s.GroupName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GroupName"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CreateSecurityGroupInput) SetDescription(v string) *CreateSecurityGroupInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateSecurityGroupInput) SetDryRun(v bool) *CreateSecurityGroupInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *CreateSecurityGroupInput) SetGroupName(v string) *CreateSecurityGroupInput {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *CreateSecurityGroupInput) SetVpcId(v string) *CreateSecurityGroupInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateSecurityGroup.
    +type CreateSecurityGroupOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the security group.
    +	GroupId *string `locationName:"groupId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSecurityGroupOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSecurityGroupOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *CreateSecurityGroupOutput) SetGroupId(v string) *CreateSecurityGroupOutput {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CreateSnapshot.
    +type CreateSnapshotInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the snapshot.
    +	Description *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the EBS volume.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSnapshotInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSnapshotInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateSnapshotInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *CreateSnapshotInput) SetDescription(v string) *CreateSnapshotInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateSnapshotInput) SetDryRun(v bool) *CreateSnapshotInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *CreateSnapshotInput) SetVolumeId(v string) *CreateSnapshotInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Contains the parameters for CreateSpotDatafeedSubscription.
    +type CreateSpotDatafeedSubscriptionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon S3 bucket in which to store the Spot instance data feed.
    +	//
    +	// Bucket is a required field
    +	Bucket *string `locationName:"bucket" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// A prefix for the data feed file names.
    +	Prefix *string `locationName:"prefix" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSpotDatafeedSubscriptionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSpotDatafeedSubscriptionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateSpotDatafeedSubscriptionInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateSpotDatafeedSubscriptionInput"}
    +	if s.Bucket == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Bucket"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetBucket sets the Bucket field's value.
    +func (s *CreateSpotDatafeedSubscriptionInput) SetBucket(v string) *CreateSpotDatafeedSubscriptionInput {
    +	s.Bucket = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateSpotDatafeedSubscriptionInput) SetDryRun(v bool) *CreateSpotDatafeedSubscriptionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPrefix sets the Prefix field's value.
    +func (s *CreateSpotDatafeedSubscriptionInput) SetPrefix(v string) *CreateSpotDatafeedSubscriptionInput {
    +	s.Prefix = &v
    +	return s
    +}
    +
    +// Contains the output of CreateSpotDatafeedSubscription.
    +type CreateSpotDatafeedSubscriptionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Spot instance data feed subscription.
    +	SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSpotDatafeedSubscriptionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSpotDatafeedSubscriptionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSpotDatafeedSubscription sets the SpotDatafeedSubscription field's value.
    +func (s *CreateSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *SpotDatafeedSubscription) *CreateSpotDatafeedSubscriptionOutput {
    +	s.SpotDatafeedSubscription = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateSubnet.
    +type CreateSubnetInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone for the subnet.
    +	//
    +	// Default: AWS selects one for you. If you create more than one subnet in your
    +	// VPC, we may not necessarily select a different zone for each subnet.
    +	AvailabilityZone *string `type:"string"`
    +
    +	// The network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.
    +	//
    +	// CidrBlock is a required field
    +	CidrBlock *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSubnetInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSubnetInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateSubnetInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateSubnetInput"}
    +	if s.CidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("CidrBlock"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *CreateSubnetInput) SetAvailabilityZone(v string) *CreateSubnetInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *CreateSubnetInput) SetCidrBlock(v string) *CreateSubnetInput {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateSubnetInput) SetDryRun(v bool) *CreateSubnetInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *CreateSubnetInput) SetVpcId(v string) *CreateSubnetInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateSubnet.
    +type CreateSubnetOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the subnet.
    +	Subnet *Subnet `locationName:"subnet" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateSubnetOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateSubnetOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSubnet sets the Subnet field's value.
    +func (s *CreateSubnetOutput) SetSubnet(v *Subnet) *CreateSubnetOutput {
    +	s.Subnet = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateTags.
    +type CreateTagsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The IDs of one or more resources to tag. For example, ami-1a2b3c4d.
    +	//
    +	// Resources is a required field
    +	Resources []*string `locationName:"ResourceId" type:"list" required:"true"`
    +
    +	// One or more tags. The value parameter is required, but if you don't want
    +	// the tag to have a value, specify the parameter with no value, and we set
    +	// the value to an empty string.
    +	//
    +	// Tags is a required field
    +	Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateTagsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateTagsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateTagsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"}
    +	if s.Resources == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Resources"))
    +	}
    +	if s.Tags == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Tags"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateTagsInput) SetDryRun(v bool) *CreateTagsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetResources sets the Resources field's value.
    +func (s *CreateTagsInput) SetResources(v []*string) *CreateTagsInput {
    +	s.Resources = v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *CreateTagsInput) SetTags(v []*Tag) *CreateTagsInput {
    +	s.Tags = v
    +	return s
    +}
    +
    +type CreateTagsOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateTagsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateTagsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for CreateVolume.
    +type CreateVolumeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone in which to create the volume. Use DescribeAvailabilityZones
    +	// to list the Availability Zones that are currently available to you.
    +	//
    +	// AvailabilityZone is a required field
    +	AvailabilityZone *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Specifies whether the volume should be encrypted. Encrypted Amazon EBS volumes
    +	// may only be attached to instances that support Amazon EBS encryption. Volumes
    +	// that are created from encrypted snapshots are automatically encrypted. There
    +	// is no way to create an encrypted volume from an unencrypted snapshot or vice
    +	// versa. If your AMI uses encrypted volumes, you can only launch it on supported
    +	// instance types. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	Encrypted *bool `locationName:"encrypted" type:"boolean"`
    +
    +	// Only valid for Provisioned IOPS SSD volumes. The number of I/O operations
    +	// per second (IOPS) to provision for the volume, with a maximum ratio of 30
    +	// IOPS/GiB.
    +	//
    +	// Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes
    +	Iops *int64 `type:"integer"`
    +
    +	// The full ARN of the AWS Key Management Service (AWS KMS) customer master
    +	// key (CMK) to use when creating the encrypted volume. This parameter is only
    +	// required if you want to use a non-default CMK; if this parameter is not specified,
    +	// the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace,
    +	// followed by the region of the CMK, the AWS account ID of the CMK owner, the
    +	// key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
    +	// If a KmsKeyId is specified, the Encrypted flag must also be set.
    +	KmsKeyId *string `type:"string"`
    +
    +	// The size of the volume, in GiBs.
    +	//
    +	// Constraints: 1-16384 for gp2, 4-16384 for io1, 500-16384 for st1, 500-16384
    +	// for sc1, and 1-1024 for standard. If you specify a snapshot, the volume size
    +	// must be equal to or larger than the snapshot size.
    +	//
    +	// Default: If you're creating the volume from a snapshot and don't specify
    +	// a volume size, the default is the snapshot size.
    +	Size *int64 `type:"integer"`
    +
    +	// The snapshot from which to create the volume.
    +	SnapshotId *string `type:"string"`
    +
    +	// The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned
    +	// IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard
    +	// for Magnetic volumes.
    +	//
    +	// Default: standard
    +	VolumeType *string `type:"string" enum:"VolumeType"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVolumeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVolumeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateVolumeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateVolumeInput"}
    +	if s.AvailabilityZone == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *CreateVolumeInput) SetAvailabilityZone(v string) *CreateVolumeInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateVolumeInput) SetDryRun(v bool) *CreateVolumeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *CreateVolumeInput) SetEncrypted(v bool) *CreateVolumeInput {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetIops sets the Iops field's value.
    +func (s *CreateVolumeInput) SetIops(v int64) *CreateVolumeInput {
    +	s.Iops = &v
    +	return s
    +}
    +
    +// SetKmsKeyId sets the KmsKeyId field's value.
    +func (s *CreateVolumeInput) SetKmsKeyId(v string) *CreateVolumeInput {
    +	s.KmsKeyId = &v
    +	return s
    +}
    +
    +// SetSize sets the Size field's value.
    +func (s *CreateVolumeInput) SetSize(v int64) *CreateVolumeInput {
    +	s.Size = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *CreateVolumeInput) SetSnapshotId(v string) *CreateVolumeInput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetVolumeType sets the VolumeType field's value.
    +func (s *CreateVolumeInput) SetVolumeType(v string) *CreateVolumeInput {
    +	s.VolumeType = &v
    +	return s
    +}
    +
    +// Describes the user or group to be added or removed from the permissions for
    +// a volume.
    +type CreateVolumePermission struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The specific group that is to be added or removed from a volume's list of
    +	// create volume permissions.
    +	Group *string `locationName:"group" type:"string" enum:"PermissionGroup"`
    +
    +	// The specific AWS account ID that is to be added or removed from a volume's
    +	// list of create volume permissions.
    +	UserId *string `locationName:"userId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVolumePermission) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVolumePermission) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroup sets the Group field's value.
    +func (s *CreateVolumePermission) SetGroup(v string) *CreateVolumePermission {
    +	s.Group = &v
    +	return s
    +}
    +
    +// SetUserId sets the UserId field's value.
    +func (s *CreateVolumePermission) SetUserId(v string) *CreateVolumePermission {
    +	s.UserId = &v
    +	return s
    +}
    +
    +// Describes modifications to the permissions for a volume.
    +type CreateVolumePermissionModifications struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Adds a specific AWS account ID or group to a volume's list of create volume
    +	// permissions.
    +	Add []*CreateVolumePermission `locationNameList:"item" type:"list"`
    +
    +	// Removes a specific AWS account ID or group from a volume's list of create
    +	// volume permissions.
    +	Remove []*CreateVolumePermission `locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVolumePermissionModifications) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVolumePermissionModifications) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAdd sets the Add field's value.
    +func (s *CreateVolumePermissionModifications) SetAdd(v []*CreateVolumePermission) *CreateVolumePermissionModifications {
    +	s.Add = v
    +	return s
    +}
    +
    +// SetRemove sets the Remove field's value.
    +func (s *CreateVolumePermissionModifications) SetRemove(v []*CreateVolumePermission) *CreateVolumePermissionModifications {
    +	s.Remove = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateVpcEndpoint.
    +type CreateVpcEndpointInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure the idempotency of
    +	// the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// A policy to attach to the endpoint that controls access to the service. The
    +	// policy must be in valid JSON format. If this parameter is not specified,
    +	// we attach a default policy that allows full access to the service.
    +	PolicyDocument *string `type:"string"`
    +
    +	// One or more route table IDs.
    +	RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"`
    +
    +	// The AWS service name, in the form com.amazonaws.region.service. To get a
    +	// list of available services, use the DescribeVpcEndpointServices request.
    +	//
    +	// ServiceName is a required field
    +	ServiceName *string `type:"string" required:"true"`
    +
    +	// The ID of the VPC in which the endpoint will be used.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpcEndpointInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpcEndpointInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateVpcEndpointInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateVpcEndpointInput"}
    +	if s.ServiceName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ServiceName"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateVpcEndpointInput) SetClientToken(v string) *CreateVpcEndpointInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateVpcEndpointInput) SetDryRun(v bool) *CreateVpcEndpointInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPolicyDocument sets the PolicyDocument field's value.
    +func (s *CreateVpcEndpointInput) SetPolicyDocument(v string) *CreateVpcEndpointInput {
    +	s.PolicyDocument = &v
    +	return s
    +}
    +
    +// SetRouteTableIds sets the RouteTableIds field's value.
    +func (s *CreateVpcEndpointInput) SetRouteTableIds(v []*string) *CreateVpcEndpointInput {
    +	s.RouteTableIds = v
    +	return s
    +}
    +
    +// SetServiceName sets the ServiceName field's value.
    +func (s *CreateVpcEndpointInput) SetServiceName(v string) *CreateVpcEndpointInput {
    +	s.ServiceName = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *CreateVpcEndpointInput) SetVpcId(v string) *CreateVpcEndpointInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateVpcEndpoint.
    +type CreateVpcEndpointOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure the idempotency of
    +	// the request.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// Information about the endpoint.
    +	VpcEndpoint *VpcEndpoint `locationName:"vpcEndpoint" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpcEndpointOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpcEndpointOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *CreateVpcEndpointOutput) SetClientToken(v string) *CreateVpcEndpointOutput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetVpcEndpoint sets the VpcEndpoint field's value.
    +func (s *CreateVpcEndpointOutput) SetVpcEndpoint(v *VpcEndpoint) *CreateVpcEndpointOutput {
    +	s.VpcEndpoint = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateVpc.
    +type CreateVpcInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.
    +	//
    +	// CidrBlock is a required field
    +	CidrBlock *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The tenancy options for instances launched into the VPC. For default, instances
    +	// are launched with shared tenancy by default. You can launch instances with
    +	// any tenancy into a shared tenancy VPC. For dedicated, instances are launched
    +	// as dedicated tenancy instances by default. You can only launch instances
    +	// with a tenancy of dedicated or host into a dedicated tenancy VPC.
    +	//
    +	// Important: The host value cannot be used with this parameter. Use the default
    +	// or dedicated values only.
    +	//
    +	// Default: default
    +	InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpcInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpcInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateVpcInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateVpcInput"}
    +	if s.CidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("CidrBlock"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *CreateVpcInput) SetCidrBlock(v string) *CreateVpcInput {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateVpcInput) SetDryRun(v bool) *CreateVpcInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceTenancy sets the InstanceTenancy field's value.
    +func (s *CreateVpcInput) SetInstanceTenancy(v string) *CreateVpcInput {
    +	s.InstanceTenancy = &v
    +	return s
    +}
    +
    +// Contains the output of CreateVpc.
    +type CreateVpcOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPC.
    +	Vpc *Vpc `locationName:"vpc" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpcOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpcOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpc sets the Vpc field's value.
    +func (s *CreateVpcOutput) SetVpc(v *Vpc) *CreateVpcOutput {
    +	s.Vpc = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateVpcPeeringConnection.
    +type CreateVpcPeeringConnectionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The AWS account ID of the owner of the peer VPC.
    +	//
    +	// Default: Your AWS account ID
    +	PeerOwnerId *string `locationName:"peerOwnerId" type:"string"`
    +
    +	// The ID of the VPC with which you are creating the VPC peering connection.
    +	PeerVpcId *string `locationName:"peerVpcId" type:"string"`
    +
    +	// The ID of the requester VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpcPeeringConnectionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpcPeeringConnectionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateVpcPeeringConnectionInput) SetDryRun(v bool) *CreateVpcPeeringConnectionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPeerOwnerId sets the PeerOwnerId field's value.
    +func (s *CreateVpcPeeringConnectionInput) SetPeerOwnerId(v string) *CreateVpcPeeringConnectionInput {
    +	s.PeerOwnerId = &v
    +	return s
    +}
    +
    +// SetPeerVpcId sets the PeerVpcId field's value.
    +func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeringConnectionInput {
    +	s.PeerVpcId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringConnectionInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateVpcPeeringConnection.
    +type CreateVpcPeeringConnectionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPC peering connection.
    +	VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpcPeeringConnectionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpcPeeringConnectionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcPeeringConnection sets the VpcPeeringConnection field's value.
    +func (s *CreateVpcPeeringConnectionOutput) SetVpcPeeringConnection(v *VpcPeeringConnection) *CreateVpcPeeringConnectionOutput {
    +	s.VpcPeeringConnection = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateVpnConnection.
    +type CreateVpnConnectionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the customer gateway.
    +	//
    +	// CustomerGatewayId is a required field
    +	CustomerGatewayId *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Indicates whether the VPN connection requires static routes. If you are creating
    +	// a VPN connection for a device that does not support BGP, you must specify
    +	// true.
    +	//
    +	// Default: false
    +	Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"`
    +
    +	// The type of VPN connection (ipsec.1).
    +	//
    +	// Type is a required field
    +	Type *string `type:"string" required:"true"`
    +
    +	// The ID of the virtual private gateway.
    +	//
    +	// VpnGatewayId is a required field
    +	VpnGatewayId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpnConnectionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpnConnectionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateVpnConnectionInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateVpnConnectionInput"}
    +	if s.CustomerGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("CustomerGatewayId"))
    +	}
    +	if s.Type == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Type"))
    +	}
    +	if s.VpnGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCustomerGatewayId sets the CustomerGatewayId field's value.
    +func (s *CreateVpnConnectionInput) SetCustomerGatewayId(v string) *CreateVpnConnectionInput {
    +	s.CustomerGatewayId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateVpnConnectionInput) SetDryRun(v bool) *CreateVpnConnectionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetOptions sets the Options field's value.
    +func (s *CreateVpnConnectionInput) SetOptions(v *VpnConnectionOptionsSpecification) *CreateVpnConnectionInput {
    +	s.Options = v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *CreateVpnConnectionInput) SetType(v string) *CreateVpnConnectionInput {
    +	s.Type = &v
    +	return s
    +}
    +
    +// SetVpnGatewayId sets the VpnGatewayId field's value.
    +func (s *CreateVpnConnectionInput) SetVpnGatewayId(v string) *CreateVpnConnectionInput {
    +	s.VpnGatewayId = &v
    +	return s
    +}
    +
    +// Contains the output of CreateVpnConnection.
    +type CreateVpnConnectionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPN connection.
    +	VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpnConnectionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpnConnectionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpnConnection sets the VpnConnection field's value.
    +func (s *CreateVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *CreateVpnConnectionOutput {
    +	s.VpnConnection = v
    +	return s
    +}
    +
    +// Contains the parameters for CreateVpnConnectionRoute.
    +type CreateVpnConnectionRouteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR block associated with the local subnet of the customer network.
    +	//
    +	// DestinationCidrBlock is a required field
    +	DestinationCidrBlock *string `type:"string" required:"true"`
    +
    +	// The ID of the VPN connection.
    +	//
    +	// VpnConnectionId is a required field
    +	VpnConnectionId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpnConnectionRouteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpnConnectionRouteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateVpnConnectionRouteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateVpnConnectionRouteInput"}
    +	if s.DestinationCidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
    +	}
    +	if s.VpnConnectionId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnConnectionId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *CreateVpnConnectionRouteInput) SetDestinationCidrBlock(v string) *CreateVpnConnectionRouteInput {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetVpnConnectionId sets the VpnConnectionId field's value.
    +func (s *CreateVpnConnectionRouteInput) SetVpnConnectionId(v string) *CreateVpnConnectionRouteInput {
    +	s.VpnConnectionId = &v
    +	return s
    +}
    +
    +type CreateVpnConnectionRouteOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpnConnectionRouteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpnConnectionRouteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for CreateVpnGateway.
    +type CreateVpnGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone for the virtual private gateway.
    +	AvailabilityZone *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The type of VPN connection this virtual private gateway supports.
    +	//
    +	// Type is a required field
    +	Type *string `type:"string" required:"true" enum:"GatewayType"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpnGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpnGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *CreateVpnGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "CreateVpnGatewayInput"}
    +	if s.Type == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Type"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *CreateVpnGatewayInput) SetAvailabilityZone(v string) *CreateVpnGatewayInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *CreateVpnGatewayInput) SetDryRun(v bool) *CreateVpnGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *CreateVpnGatewayInput) SetType(v string) *CreateVpnGatewayInput {
    +	s.Type = &v
    +	return s
    +}
    +
    +// Contains the output of CreateVpnGateway.
    +type CreateVpnGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the virtual private gateway.
    +	VpnGateway *VpnGateway `locationName:"vpnGateway" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s CreateVpnGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CreateVpnGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpnGateway sets the VpnGateway field's value.
    +func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayOutput {
    +	s.VpnGateway = v
    +	return s
    +}
    +
    +// Describes a customer gateway.
    +type CustomerGateway struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number
    +	// (ASN).
    +	BgpAsn *string `locationName:"bgpAsn" type:"string"`
    +
    +	// The ID of the customer gateway.
    +	CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"`
    +
    +	// The Internet-routable IP address of the customer gateway's outside interface.
    +	IpAddress *string `locationName:"ipAddress" type:"string"`
    +
    +	// The current state of the customer gateway (pending | available | deleting
    +	// | deleted).
    +	State *string `locationName:"state" type:"string"`
    +
    +	// Any tags assigned to the customer gateway.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The type of VPN connection the customer gateway supports (ipsec.1).
    +	Type *string `locationName:"type" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s CustomerGateway) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s CustomerGateway) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBgpAsn sets the BgpAsn field's value.
    +func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway {
    +	s.BgpAsn = &v
    +	return s
    +}
    +
    +// SetCustomerGatewayId sets the CustomerGatewayId field's value.
    +func (s *CustomerGateway) SetCustomerGatewayId(v string) *CustomerGateway {
    +	s.CustomerGatewayId = &v
    +	return s
    +}
    +
    +// SetIpAddress sets the IpAddress field's value.
    +func (s *CustomerGateway) SetIpAddress(v string) *CustomerGateway {
    +	s.IpAddress = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *CustomerGateway) SetState(v string) *CustomerGateway {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *CustomerGateway) SetTags(v []*Tag) *CustomerGateway {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *CustomerGateway) SetType(v string) *CustomerGateway {
    +	s.Type = &v
    +	return s
    +}
    +
    +// Contains the parameters for DeleteCustomerGateway.
    +type DeleteCustomerGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the customer gateway.
    +	//
    +	// CustomerGatewayId is a required field
    +	CustomerGatewayId *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteCustomerGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteCustomerGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteCustomerGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteCustomerGatewayInput"}
    +	if s.CustomerGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("CustomerGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCustomerGatewayId sets the CustomerGatewayId field's value.
    +func (s *DeleteCustomerGatewayInput) SetCustomerGatewayId(v string) *DeleteCustomerGatewayInput {
    +	s.CustomerGatewayId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteCustomerGatewayInput) SetDryRun(v bool) *DeleteCustomerGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +type DeleteCustomerGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteCustomerGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteCustomerGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteDhcpOptions.
    +type DeleteDhcpOptionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the DHCP options set.
    +	//
    +	// DhcpOptionsId is a required field
    +	DhcpOptionsId *string `type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteDhcpOptionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteDhcpOptionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteDhcpOptionsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteDhcpOptionsInput"}
    +	if s.DhcpOptionsId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DhcpOptionsId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDhcpOptionsId sets the DhcpOptionsId field's value.
    +func (s *DeleteDhcpOptionsInput) SetDhcpOptionsId(v string) *DeleteDhcpOptionsInput {
    +	s.DhcpOptionsId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteDhcpOptionsInput) SetDryRun(v bool) *DeleteDhcpOptionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +type DeleteDhcpOptionsOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteDhcpOptionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteDhcpOptionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteFlowLogs.
    +type DeleteFlowLogsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more flow log IDs.
    +	//
    +	// FlowLogIds is a required field
    +	FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteFlowLogsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteFlowLogsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteFlowLogsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteFlowLogsInput"}
    +	if s.FlowLogIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("FlowLogIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetFlowLogIds sets the FlowLogIds field's value.
    +func (s *DeleteFlowLogsInput) SetFlowLogIds(v []*string) *DeleteFlowLogsInput {
    +	s.FlowLogIds = v
    +	return s
    +}
    +
    +// Contains the output of DeleteFlowLogs.
    +type DeleteFlowLogsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the flow logs that could not be deleted successfully.
    +	Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteFlowLogsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteFlowLogsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetUnsuccessful sets the Unsuccessful field's value.
    +func (s *DeleteFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteFlowLogsOutput {
    +	s.Unsuccessful = v
    +	return s
    +}
    +
    +// Contains the parameters for DeleteInternetGateway.
    +type DeleteInternetGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the Internet gateway.
    +	//
    +	// InternetGatewayId is a required field
    +	InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteInternetGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteInternetGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteInternetGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteInternetGatewayInput"}
    +	if s.InternetGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InternetGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteInternetGatewayInput) SetDryRun(v bool) *DeleteInternetGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInternetGatewayId sets the InternetGatewayId field's value.
    +func (s *DeleteInternetGatewayInput) SetInternetGatewayId(v string) *DeleteInternetGatewayInput {
    +	s.InternetGatewayId = &v
    +	return s
    +}
    +
    +type DeleteInternetGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteInternetGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteInternetGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteKeyPair.
    +type DeleteKeyPairInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The name of the key pair.
    +	//
    +	// KeyName is a required field
    +	KeyName *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteKeyPairInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteKeyPairInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteKeyPairInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"}
    +	if s.KeyName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("KeyName"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteKeyPairInput) SetDryRun(v bool) *DeleteKeyPairInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *DeleteKeyPairInput) SetKeyName(v string) *DeleteKeyPairInput {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +type DeleteKeyPairOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteKeyPairOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteKeyPairOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteNatGateway.
    +type DeleteNatGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the NAT gateway.
    +	//
    +	// NatGatewayId is a required field
    +	NatGatewayId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNatGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNatGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteNatGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteNatGatewayInput"}
    +	if s.NatGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NatGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetNatGatewayId sets the NatGatewayId field's value.
    +func (s *DeleteNatGatewayInput) SetNatGatewayId(v string) *DeleteNatGatewayInput {
    +	s.NatGatewayId = &v
    +	return s
    +}
    +
    +// Contains the output of DeleteNatGateway.
    +type DeleteNatGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the NAT gateway.
    +	NatGatewayId *string `locationName:"natGatewayId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNatGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNatGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNatGatewayId sets the NatGatewayId field's value.
    +func (s *DeleteNatGatewayOutput) SetNatGatewayId(v string) *DeleteNatGatewayOutput {
    +	s.NatGatewayId = &v
    +	return s
    +}
    +
    +// Contains the parameters for DeleteNetworkAclEntry.
    +type DeleteNetworkAclEntryInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Indicates whether the rule is an egress rule.
    +	//
    +	// Egress is a required field
    +	Egress *bool `locationName:"egress" type:"boolean" required:"true"`
    +
    +	// The ID of the network ACL.
    +	//
    +	// NetworkAclId is a required field
    +	NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"`
    +
    +	// The rule number of the entry to delete.
    +	//
    +	// RuleNumber is a required field
    +	RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNetworkAclEntryInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNetworkAclEntryInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteNetworkAclEntryInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkAclEntryInput"}
    +	if s.Egress == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Egress"))
    +	}
    +	if s.NetworkAclId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkAclId"))
    +	}
    +	if s.RuleNumber == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RuleNumber"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteNetworkAclEntryInput) SetDryRun(v bool) *DeleteNetworkAclEntryInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEgress sets the Egress field's value.
    +func (s *DeleteNetworkAclEntryInput) SetEgress(v bool) *DeleteNetworkAclEntryInput {
    +	s.Egress = &v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *DeleteNetworkAclEntryInput) SetNetworkAclId(v string) *DeleteNetworkAclEntryInput {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +// SetRuleNumber sets the RuleNumber field's value.
    +func (s *DeleteNetworkAclEntryInput) SetRuleNumber(v int64) *DeleteNetworkAclEntryInput {
    +	s.RuleNumber = &v
    +	return s
    +}
    +
    +type DeleteNetworkAclEntryOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNetworkAclEntryOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNetworkAclEntryOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteNetworkAcl.
    +type DeleteNetworkAclInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the network ACL.
    +	//
    +	// NetworkAclId is a required field
    +	NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNetworkAclInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNetworkAclInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteNetworkAclInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkAclInput"}
    +	if s.NetworkAclId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkAclId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteNetworkAclInput) SetDryRun(v bool) *DeleteNetworkAclInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *DeleteNetworkAclInput) SetNetworkAclId(v string) *DeleteNetworkAclInput {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +type DeleteNetworkAclOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNetworkAclOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNetworkAclOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteNetworkInterface.
    +type DeleteNetworkInterfaceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNetworkInterfaceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNetworkInterfaceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteNetworkInterfaceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkInterfaceInput"}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteNetworkInterfaceInput) SetDryRun(v bool) *DeleteNetworkInterfaceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *DeleteNetworkInterfaceInput) SetNetworkInterfaceId(v string) *DeleteNetworkInterfaceInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +type DeleteNetworkInterfaceOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteNetworkInterfaceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteNetworkInterfaceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeletePlacementGroup.
    +type DeletePlacementGroupInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The name of the placement group.
    +	//
    +	// GroupName is a required field
    +	GroupName *string `locationName:"groupName" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeletePlacementGroupInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeletePlacementGroupInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeletePlacementGroupInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeletePlacementGroupInput"}
    +	if s.GroupName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GroupName"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeletePlacementGroupInput) SetDryRun(v bool) *DeletePlacementGroupInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *DeletePlacementGroupInput) SetGroupName(v string) *DeletePlacementGroupInput {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +type DeletePlacementGroupOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeletePlacementGroupOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeletePlacementGroupOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteRoute.
    +type DeleteRouteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR range for the route. The value you specify must match the CIDR for
    +	// the route exactly.
    +	//
    +	// DestinationCidrBlock is a required field
    +	DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the route table.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteRouteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteRouteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteRouteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteRouteInput"}
    +	if s.DestinationCidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
    +	}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *DeleteRouteInput) SetDestinationCidrBlock(v string) *DeleteRouteInput {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteRouteInput) SetDryRun(v bool) *DeleteRouteInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *DeleteRouteInput) SetRouteTableId(v string) *DeleteRouteInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +type DeleteRouteOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteRouteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteRouteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteRouteTable.
    +type DeleteRouteTableInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the route table.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteRouteTableInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteRouteTableInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteRouteTableInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteRouteTableInput"}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteRouteTableInput) SetDryRun(v bool) *DeleteRouteTableInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *DeleteRouteTableInput) SetRouteTableId(v string) *DeleteRouteTableInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +type DeleteRouteTableOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteRouteTableOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteRouteTableOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteSecurityGroup.
    +type DeleteSecurityGroupInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the security group. Required for a nondefault VPC.
    +	GroupId *string `type:"string"`
    +
    +	// [EC2-Classic, default VPC] The name of the security group. You can specify
    +	// either the security group name or the security group ID.
    +	GroupName *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSecurityGroupInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSecurityGroupInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteSecurityGroupInput) SetDryRun(v bool) *DeleteSecurityGroupInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *DeleteSecurityGroupInput) SetGroupId(v string) *DeleteSecurityGroupInput {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *DeleteSecurityGroupInput) SetGroupName(v string) *DeleteSecurityGroupInput {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +type DeleteSecurityGroupOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSecurityGroupOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSecurityGroupOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteSnapshot.
    +type DeleteSnapshotInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the EBS snapshot.
    +	//
    +	// SnapshotId is a required field
    +	SnapshotId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSnapshotInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSnapshotInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteSnapshotInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"}
    +	if s.SnapshotId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SnapshotId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteSnapshotInput) SetDryRun(v bool) *DeleteSnapshotInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *DeleteSnapshotInput) SetSnapshotId(v string) *DeleteSnapshotInput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +type DeleteSnapshotOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSnapshotOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSnapshotOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteSpotDatafeedSubscription.
    +type DeleteSpotDatafeedSubscriptionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSpotDatafeedSubscriptionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSpotDatafeedSubscriptionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteSpotDatafeedSubscriptionInput) SetDryRun(v bool) *DeleteSpotDatafeedSubscriptionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +type DeleteSpotDatafeedSubscriptionOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSpotDatafeedSubscriptionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteSubnet.
    +type DeleteSubnetInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the subnet.
    +	//
    +	// SubnetId is a required field
    +	SubnetId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSubnetInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSubnetInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteSubnetInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteSubnetInput"}
    +	if s.SubnetId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SubnetId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteSubnetInput) SetDryRun(v bool) *DeleteSubnetInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *DeleteSubnetInput) SetSubnetId(v string) *DeleteSubnetInput {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +type DeleteSubnetOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteSubnetOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteSubnetOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteTags.
    +type DeleteTagsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the resource. For example, ami-1a2b3c4d. You can specify more than
    +	// one resource ID.
    +	//
    +	// Resources is a required field
    +	Resources []*string `locationName:"resourceId" type:"list" required:"true"`
    +
    +	// One or more tags to delete. If you omit the value parameter, we delete the
    +	// tag regardless of its value. If you specify this parameter with an empty
    +	// string as the value, we delete the key only if its value is an empty string.
    +	Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteTagsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteTagsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteTagsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"}
    +	if s.Resources == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Resources"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteTagsInput) SetDryRun(v bool) *DeleteTagsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetResources sets the Resources field's value.
    +func (s *DeleteTagsInput) SetResources(v []*string) *DeleteTagsInput {
    +	s.Resources = v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *DeleteTagsInput) SetTags(v []*Tag) *DeleteTagsInput {
    +	s.Tags = v
    +	return s
    +}
    +
    +type DeleteTagsOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteTagsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteTagsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteVolume.
    +type DeleteVolumeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the volume.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVolumeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVolumeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVolumeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVolumeInput"}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteVolumeInput) SetDryRun(v bool) *DeleteVolumeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *DeleteVolumeInput) SetVolumeId(v string) *DeleteVolumeInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +type DeleteVolumeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVolumeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVolumeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteVpcEndpoints.
    +type DeleteVpcEndpointsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more endpoint IDs.
    +	//
    +	// VpcEndpointIds is a required field
    +	VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpcEndpointsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpcEndpointsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVpcEndpointsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVpcEndpointsInput"}
    +	if s.VpcEndpointIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcEndpointIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteVpcEndpointsInput) SetDryRun(v bool) *DeleteVpcEndpointsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcEndpointIds sets the VpcEndpointIds field's value.
    +func (s *DeleteVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DeleteVpcEndpointsInput {
    +	s.VpcEndpointIds = v
    +	return s
    +}
    +
    +// Contains the output of DeleteVpcEndpoints.
    +type DeleteVpcEndpointsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the endpoints that were not successfully deleted.
    +	Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpcEndpointsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpcEndpointsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetUnsuccessful sets the Unsuccessful field's value.
    +func (s *DeleteVpcEndpointsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteVpcEndpointsOutput {
    +	s.Unsuccessful = v
    +	return s
    +}
    +
    +// Contains the parameters for DeleteVpc.
    +type DeleteVpcInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpcInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpcInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVpcInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVpcInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteVpcInput) SetDryRun(v bool) *DeleteVpcInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DeleteVpcInput) SetVpcId(v string) *DeleteVpcInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +type DeleteVpcOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpcOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpcOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteVpcPeeringConnection.
    +type DeleteVpcPeeringConnectionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC peering connection.
    +	//
    +	// VpcPeeringConnectionId is a required field
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpcPeeringConnectionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpcPeeringConnectionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVpcPeeringConnectionInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringConnectionInput"}
    +	if s.VpcPeeringConnectionId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteVpcPeeringConnectionInput) SetDryRun(v bool) *DeleteVpcPeeringConnectionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *DeleteVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *DeleteVpcPeeringConnectionInput {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Contains the output of DeleteVpcPeeringConnection.
    +type DeleteVpcPeeringConnectionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpcPeeringConnectionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpcPeeringConnectionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *DeleteVpcPeeringConnectionOutput) SetReturn(v bool) *DeleteVpcPeeringConnectionOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for DeleteVpnConnection.
    +type DeleteVpnConnectionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPN connection.
    +	//
    +	// VpnConnectionId is a required field
    +	VpnConnectionId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpnConnectionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpnConnectionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVpnConnectionInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVpnConnectionInput"}
    +	if s.VpnConnectionId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnConnectionId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteVpnConnectionInput) SetDryRun(v bool) *DeleteVpnConnectionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpnConnectionId sets the VpnConnectionId field's value.
    +func (s *DeleteVpnConnectionInput) SetVpnConnectionId(v string) *DeleteVpnConnectionInput {
    +	s.VpnConnectionId = &v
    +	return s
    +}
    +
    +type DeleteVpnConnectionOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpnConnectionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpnConnectionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteVpnConnectionRoute.
    +type DeleteVpnConnectionRouteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR block associated with the local subnet of the customer network.
    +	//
    +	// DestinationCidrBlock is a required field
    +	DestinationCidrBlock *string `type:"string" required:"true"`
    +
    +	// The ID of the VPN connection.
    +	//
    +	// VpnConnectionId is a required field
    +	VpnConnectionId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpnConnectionRouteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpnConnectionRouteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVpnConnectionRouteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVpnConnectionRouteInput"}
    +	if s.DestinationCidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
    +	}
    +	if s.VpnConnectionId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnConnectionId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *DeleteVpnConnectionRouteInput) SetDestinationCidrBlock(v string) *DeleteVpnConnectionRouteInput {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetVpnConnectionId sets the VpnConnectionId field's value.
    +func (s *DeleteVpnConnectionRouteInput) SetVpnConnectionId(v string) *DeleteVpnConnectionRouteInput {
    +	s.VpnConnectionId = &v
    +	return s
    +}
    +
    +type DeleteVpnConnectionRouteOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpnConnectionRouteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpnConnectionRouteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeleteVpnGateway.
    +type DeleteVpnGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the virtual private gateway.
    +	//
    +	// VpnGatewayId is a required field
    +	VpnGatewayId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpnGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpnGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeleteVpnGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeleteVpnGatewayInput"}
    +	if s.VpnGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeleteVpnGatewayInput) SetDryRun(v bool) *DeleteVpnGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpnGatewayId sets the VpnGatewayId field's value.
    +func (s *DeleteVpnGatewayInput) SetVpnGatewayId(v string) *DeleteVpnGatewayInput {
    +	s.VpnGatewayId = &v
    +	return s
    +}
    +
    +type DeleteVpnGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeleteVpnGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeleteVpnGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DeregisterImage.
    +type DeregisterImageInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the AMI.
    +	//
    +	// ImageId is a required field
    +	ImageId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DeregisterImageInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeregisterImageInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DeregisterImageInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DeregisterImageInput"}
    +	if s.ImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImageId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DeregisterImageInput) SetDryRun(v bool) *DeregisterImageInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *DeregisterImageInput) SetImageId(v string) *DeregisterImageInput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +type DeregisterImageOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DeregisterImageOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DeregisterImageOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DescribeAccountAttributes.
    +type DescribeAccountAttributesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more account attribute names.
    +	AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeAccountAttributesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeAccountAttributesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttributeNames sets the AttributeNames field's value.
    +func (s *DescribeAccountAttributesInput) SetAttributeNames(v []*string) *DescribeAccountAttributesInput {
    +	s.AttributeNames = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeAccountAttributesInput) SetDryRun(v bool) *DescribeAccountAttributesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeAccountAttributes.
    +type DescribeAccountAttributesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more account attributes.
    +	AccountAttributes []*AccountAttribute `locationName:"accountAttributeSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeAccountAttributesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeAccountAttributesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAccountAttributes sets the AccountAttributes field's value.
    +func (s *DescribeAccountAttributesOutput) SetAccountAttributes(v []*AccountAttribute) *DescribeAccountAttributesOutput {
    +	s.AccountAttributes = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeAddresses.
    +type DescribeAddressesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-VPC] One or more allocation IDs.
    +	//
    +	// Default: Describes all your Elastic IP addresses.
    +	AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters. Filter names and values are case-sensitive.
    +	//
    +	//    * allocation-id - [EC2-VPC] The allocation ID for the address.
    +	//
    +	//    * association-id - [EC2-VPC] The association ID for the address.
    +	//
    +	//    * domain - Indicates whether the address is for use in EC2-Classic (standard)
    +	//    or in a VPC (vpc).
    +	//
    +	//    * instance-id - The ID of the instance the address is associated with,
    +	//    if any.
    +	//
    +	//    * network-interface-id - [EC2-VPC] The ID of the network interface that
    +	//    the address is associated with, if any.
    +	//
    +	//    * network-interface-owner-id - The AWS account ID of the owner.
    +	//
    +	//    * private-ip-address - [EC2-VPC] The private IP address associated with
    +	//    the Elastic IP address.
    +	//
    +	//    * public-ip - The Elastic IP address.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// [EC2-Classic] One or more Elastic IP addresses.
    +	//
    +	// Default: Describes all your Elastic IP addresses.
    +	PublicIps []*string `locationName:"PublicIp" locationNameList:"PublicIp" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeAddressesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeAddressesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationIds sets the AllocationIds field's value.
    +func (s *DescribeAddressesInput) SetAllocationIds(v []*string) *DescribeAddressesInput {
    +	s.AllocationIds = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeAddressesInput) SetDryRun(v bool) *DescribeAddressesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeAddressesInput) SetFilters(v []*Filter) *DescribeAddressesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetPublicIps sets the PublicIps field's value.
    +func (s *DescribeAddressesInput) SetPublicIps(v []*string) *DescribeAddressesInput {
    +	s.PublicIps = v
    +	return s
    +}
    +
    +// Contains the output of DescribeAddresses.
    +type DescribeAddressesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more Elastic IP addresses.
    +	Addresses []*Address `locationName:"addressesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeAddressesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeAddressesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAddresses sets the Addresses field's value.
    +func (s *DescribeAddressesOutput) SetAddresses(v []*Address) *DescribeAddressesOutput {
    +	s.Addresses = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeAvailabilityZones.
    +type DescribeAvailabilityZonesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * message - Information about the Availability Zone.
    +	//
    +	//    * region-name - The name of the region for the Availability Zone (for
    +	//    example, us-east-1).
    +	//
    +	//    * state - The state of the Availability Zone (available | information
    +	//    | impaired | unavailable).
    +	//
    +	//    * zone-name - The name of the Availability Zone (for example, us-east-1a).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The names of one or more Availability Zones.
    +	ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeAvailabilityZonesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeAvailabilityZonesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeAvailabilityZonesInput) SetDryRun(v bool) *DescribeAvailabilityZonesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeAvailabilityZonesInput) SetFilters(v []*Filter) *DescribeAvailabilityZonesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetZoneNames sets the ZoneNames field's value.
    +func (s *DescribeAvailabilityZonesInput) SetZoneNames(v []*string) *DescribeAvailabilityZonesInput {
    +	s.ZoneNames = v
    +	return s
    +}
    +
    +// Contains the output of DescribeAvailabiltyZones.
    +type DescribeAvailabilityZonesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more Availability Zones.
    +	AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeAvailabilityZonesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeAvailabilityZonesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZones sets the AvailabilityZones field's value.
    +func (s *DescribeAvailabilityZonesOutput) SetAvailabilityZones(v []*AvailabilityZone) *DescribeAvailabilityZonesOutput {
    +	s.AvailabilityZones = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeBundleTasks.
    +type DescribeBundleTasksInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more bundle task IDs.
    +	//
    +	// Default: Describes all your bundle tasks.
    +	BundleIds []*string `locationName:"BundleId" locationNameList:"BundleId" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * bundle-id - The ID of the bundle task.
    +	//
    +	//    * error-code - If the task failed, the error code returned.
    +	//
    +	//    * error-message - If the task failed, the error message returned.
    +	//
    +	//    * instance-id - The ID of the instance.
    +	//
    +	//    * progress - The level of task completion, as a percentage (for example,
    +	//    20%).
    +	//
    +	//    * s3-bucket - The Amazon S3 bucket to store the AMI.
    +	//
    +	//    * s3-prefix - The beginning of the AMI name.
    +	//
    +	//    * start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).
    +	//
    +	//    * state - The state of the task (pending | waiting-for-shutdown | bundling
    +	//    | storing | cancelling | complete | failed).
    +	//
    +	//    * update-time - The time of the most recent update for the task.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeBundleTasksInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeBundleTasksInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBundleIds sets the BundleIds field's value.
    +func (s *DescribeBundleTasksInput) SetBundleIds(v []*string) *DescribeBundleTasksInput {
    +	s.BundleIds = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeBundleTasksInput) SetDryRun(v bool) *DescribeBundleTasksInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeBundleTasksInput) SetFilters(v []*Filter) *DescribeBundleTasksInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// Contains the output of DescribeBundleTasks.
    +type DescribeBundleTasksOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more bundle tasks.
    +	BundleTasks []*BundleTask `locationName:"bundleInstanceTasksSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeBundleTasksOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeBundleTasksOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBundleTasks sets the BundleTasks field's value.
    +func (s *DescribeBundleTasksOutput) SetBundleTasks(v []*BundleTask) *DescribeBundleTasksOutput {
    +	s.BundleTasks = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeClassicLinkInstances.
    +type DescribeClassicLinkInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * group-id - The ID of a VPC security group that's associated with the
    +	//    instance.
    +	//
    +	//    * instance-id - The ID of the instance.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-id - The ID of the VPC that the instance is linked to.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more instance IDs. Must be instances linked to a VPC through ClassicLink.
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results of the initial request can be seen by sending another
    +	// request with the returned NextToken value. This value can be between 5 and
    +	// 1000; if MaxResults is given a value larger than 1000, only 1000 results
    +	// are returned. You cannot specify this parameter and the instance IDs parameter
    +	// in the same request.
    +	//
    +	// Constraint: If the value is greater than 1000, we return only 1000 items.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeClassicLinkInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeClassicLinkInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeClassicLinkInstancesInput) SetDryRun(v bool) *DescribeClassicLinkInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeClassicLinkInstancesInput) SetFilters(v []*Filter) *DescribeClassicLinkInstancesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *DescribeClassicLinkInstancesInput) SetInstanceIds(v []*string) *DescribeClassicLinkInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeClassicLinkInstancesInput) SetMaxResults(v int64) *DescribeClassicLinkInstancesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeClassicLinkInstancesInput) SetNextToken(v string) *DescribeClassicLinkInstancesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeClassicLinkInstances.
    +type DescribeClassicLinkInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more linked EC2-Classic instances.
    +	Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeClassicLinkInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeClassicLinkInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstances sets the Instances field's value.
    +func (s *DescribeClassicLinkInstancesOutput) SetInstances(v []*ClassicLinkInstance) *DescribeClassicLinkInstancesOutput {
    +	s.Instances = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeClassicLinkInstancesOutput) SetNextToken(v string) *DescribeClassicLinkInstancesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeConversionTasks.
    +type DescribeConversionTasksInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more conversion task IDs.
    +	ConversionTaskIds []*string `locationName:"conversionTaskId" locationNameList:"item" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeConversionTasksInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeConversionTasksInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetConversionTaskIds sets the ConversionTaskIds field's value.
    +func (s *DescribeConversionTasksInput) SetConversionTaskIds(v []*string) *DescribeConversionTasksInput {
    +	s.ConversionTaskIds = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeConversionTasksInput) SetDryRun(v bool) *DescribeConversionTasksInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output for DescribeConversionTasks.
    +type DescribeConversionTasksOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the conversion tasks.
    +	ConversionTasks []*ConversionTask `locationName:"conversionTasks" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeConversionTasksOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeConversionTasksOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetConversionTasks sets the ConversionTasks field's value.
    +func (s *DescribeConversionTasksOutput) SetConversionTasks(v []*ConversionTask) *DescribeConversionTasksOutput {
    +	s.ConversionTasks = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeCustomerGateways.
    +type DescribeCustomerGatewaysInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more customer gateway IDs.
    +	//
    +	// Default: Describes all your customer gateways.
    +	CustomerGatewayIds []*string `locationName:"CustomerGatewayId" locationNameList:"CustomerGatewayId" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous
    +	//    System Number (ASN).
    +	//
    +	//    * customer-gateway-id - The ID of the customer gateway.
    +	//
    +	//    * ip-address - The IP address of the customer gateway's Internet-routable
    +	//    external interface.
    +	//
    +	//    * state - The state of the customer gateway (pending | available | deleting
    +	//    | deleted).
    +	//
    +	//    * type - The type of customer gateway. Currently, the only supported type
    +	//    is ipsec.1.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeCustomerGatewaysInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeCustomerGatewaysInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCustomerGatewayIds sets the CustomerGatewayIds field's value.
    +func (s *DescribeCustomerGatewaysInput) SetCustomerGatewayIds(v []*string) *DescribeCustomerGatewaysInput {
    +	s.CustomerGatewayIds = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeCustomerGatewaysInput) SetDryRun(v bool) *DescribeCustomerGatewaysInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeCustomerGatewaysInput) SetFilters(v []*Filter) *DescribeCustomerGatewaysInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// Contains the output of DescribeCustomerGateways.
    +type DescribeCustomerGatewaysOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more customer gateways.
    +	CustomerGateways []*CustomerGateway `locationName:"customerGatewaySet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeCustomerGatewaysOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeCustomerGatewaysOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCustomerGateways sets the CustomerGateways field's value.
    +func (s *DescribeCustomerGatewaysOutput) SetCustomerGateways(v []*CustomerGateway) *DescribeCustomerGatewaysOutput {
    +	s.CustomerGateways = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeDhcpOptions.
    +type DescribeDhcpOptionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IDs of one or more DHCP options sets.
    +	//
    +	// Default: Describes all your DHCP options sets.
    +	DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * dhcp-options-id - The ID of a set of DHCP options.
    +	//
    +	//    * key - The key for one of the options (for example, domain-name).
    +	//
    +	//    * value - The value for one of the options.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeDhcpOptionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeDhcpOptionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDhcpOptionsIds sets the DhcpOptionsIds field's value.
    +func (s *DescribeDhcpOptionsInput) SetDhcpOptionsIds(v []*string) *DescribeDhcpOptionsInput {
    +	s.DhcpOptionsIds = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeDhcpOptionsInput) SetDryRun(v bool) *DescribeDhcpOptionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeDhcpOptionsInput) SetFilters(v []*Filter) *DescribeDhcpOptionsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// Contains the output of DescribeDhcpOptions.
    +type DescribeDhcpOptionsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more DHCP options sets.
    +	DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeDhcpOptionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeDhcpOptionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDhcpOptions sets the DhcpOptions field's value.
    +func (s *DescribeDhcpOptionsOutput) SetDhcpOptions(v []*DhcpOptions) *DescribeDhcpOptionsOutput {
    +	s.DhcpOptions = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeExportTasks.
    +type DescribeExportTasksInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more export task IDs.
    +	ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeExportTasksInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeExportTasksInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetExportTaskIds sets the ExportTaskIds field's value.
    +func (s *DescribeExportTasksInput) SetExportTaskIds(v []*string) *DescribeExportTasksInput {
    +	s.ExportTaskIds = v
    +	return s
    +}
    +
    +// Contains the output for DescribeExportTasks.
    +type DescribeExportTasksOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the export tasks.
    +	ExportTasks []*ExportTask `locationName:"exportTaskSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeExportTasksOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeExportTasksOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetExportTasks sets the ExportTasks field's value.
    +func (s *DescribeExportTasksOutput) SetExportTasks(v []*ExportTask) *DescribeExportTasksOutput {
    +	s.ExportTasks = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeFlowLogs.
    +type DescribeFlowLogsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).
    +	//
    +	//    * flow-log-id - The ID of the flow log.
    +	//
    +	//    * log-group-name - The name of the log group.
    +	//
    +	//    * resource-id - The ID of the VPC, subnet, or network interface.
    +	//
    +	//    * traffic-type - The type of traffic (ACCEPT | REJECT | ALL)
    +	Filter []*Filter `locationNameList:"Filter" type:"list"`
    +
    +	// One or more flow log IDs.
    +	FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results can be seen by sending another request with the returned
    +	// NextToken value. This value can be between 5 and 1000; if MaxResults is given
    +	// a value larger than 1000, only 1000 results are returned. You cannot specify
    +	// this parameter and the flow log IDs parameter in the same request.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeFlowLogsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeFlowLogsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilter sets the Filter field's value.
    +func (s *DescribeFlowLogsInput) SetFilter(v []*Filter) *DescribeFlowLogsInput {
    +	s.Filter = v
    +	return s
    +}
    +
    +// SetFlowLogIds sets the FlowLogIds field's value.
    +func (s *DescribeFlowLogsInput) SetFlowLogIds(v []*string) *DescribeFlowLogsInput {
    +	s.FlowLogIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeFlowLogsInput) SetMaxResults(v int64) *DescribeFlowLogsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeFlowLogsInput) SetNextToken(v string) *DescribeFlowLogsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeFlowLogs.
    +type DescribeFlowLogsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the flow logs.
    +	FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeFlowLogsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeFlowLogsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFlowLogs sets the FlowLogs field's value.
    +func (s *DescribeFlowLogsOutput) SetFlowLogs(v []*FlowLog) *DescribeFlowLogsOutput {
    +	s.FlowLogs = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +type DescribeHostReservationOfferingsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * instance-family - The instance family of the offering (e.g., m4).
    +	//
    +	//    * payment-option - The payment option (No Upfront | Partial Upfront |
    +	//    All Upfront).
    +	Filter []*Filter `locationNameList:"Filter" type:"list"`
    +
    +	// This is the maximum duration of the reservation you'd like to purchase, specified
    +	// in seconds. Reservations are available in one-year and three-year terms.
    +	// The number of seconds specified must be the number of seconds in a year (365x24x60x60)
    +	// times one of the supported durations (1 or 3). For example, specify 94608000
    +	// for three years.
    +	MaxDuration *int64 `type:"integer"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results can be seen by sending another request with the returned
    +	// nextToken value. This value can be between 5 and 500; if maxResults is given
    +	// a larger value than 500, you will receive an error.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// This is the minimum duration of the reservation you'd like to purchase, specified
    +	// in seconds. Reservations are available in one-year and three-year terms.
    +	// The number of seconds specified must be the number of seconds in a year (365x24x60x60)
    +	// times one of the supported durations (1 or 3). For example, specify 31536000
    +	// for one year.
    +	MinDuration *int64 `type:"integer"`
    +
    +	// The token to use to retrieve the next page of results.
    +	NextToken *string `type:"string"`
    +
    +	// The ID of the reservation offering.
    +	OfferingId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeHostReservationOfferingsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeHostReservationOfferingsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilter sets the Filter field's value.
    +func (s *DescribeHostReservationOfferingsInput) SetFilter(v []*Filter) *DescribeHostReservationOfferingsInput {
    +	s.Filter = v
    +	return s
    +}
    +
    +// SetMaxDuration sets the MaxDuration field's value.
    +func (s *DescribeHostReservationOfferingsInput) SetMaxDuration(v int64) *DescribeHostReservationOfferingsInput {
    +	s.MaxDuration = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeHostReservationOfferingsInput) SetMaxResults(v int64) *DescribeHostReservationOfferingsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetMinDuration sets the MinDuration field's value.
    +func (s *DescribeHostReservationOfferingsInput) SetMinDuration(v int64) *DescribeHostReservationOfferingsInput {
    +	s.MinDuration = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeHostReservationOfferingsInput) SetNextToken(v string) *DescribeHostReservationOfferingsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *DescribeHostReservationOfferingsInput) SetOfferingId(v string) *DescribeHostReservationOfferingsInput {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +type DescribeHostReservationOfferingsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the offerings.
    +	OfferingSet []*HostOffering `locationName:"offeringSet" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeHostReservationOfferingsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeHostReservationOfferingsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeHostReservationOfferingsOutput) SetNextToken(v string) *DescribeHostReservationOfferingsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetOfferingSet sets the OfferingSet field's value.
    +func (s *DescribeHostReservationOfferingsOutput) SetOfferingSet(v []*HostOffering) *DescribeHostReservationOfferingsOutput {
    +	s.OfferingSet = v
    +	return s
    +}
    +
    +type DescribeHostReservationsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * instance-family - The instance family (e.g., m4).
    +	//
    +	//    * payment-option - The payment option (No Upfront | Partial Upfront |
    +	//    All Upfront).
    +	//
    +	//    * state - The state of the reservation (payment-pending | payment-failed
    +	//    | active | retired).
    +	Filter []*Filter `locationNameList:"Filter" type:"list"`
    +
    +	// One or more host reservation IDs.
    +	HostReservationIdSet []*string `locationNameList:"item" type:"list"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results can be seen by sending another request with the returned
    +	// nextToken value. This value can be between 5 and 500; if maxResults is given
    +	// a larger value than 500, you will receive an error.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token to use to retrieve the next page of results.
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeHostReservationsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeHostReservationsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilter sets the Filter field's value.
    +func (s *DescribeHostReservationsInput) SetFilter(v []*Filter) *DescribeHostReservationsInput {
    +	s.Filter = v
    +	return s
    +}
    +
    +// SetHostReservationIdSet sets the HostReservationIdSet field's value.
    +func (s *DescribeHostReservationsInput) SetHostReservationIdSet(v []*string) *DescribeHostReservationsInput {
    +	s.HostReservationIdSet = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeHostReservationsInput) SetMaxResults(v int64) *DescribeHostReservationsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeHostReservationsInput) SetNextToken(v string) *DescribeHostReservationsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +type DescribeHostReservationsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Details about the reservation's configuration.
    +	HostReservationSet []*HostReservation `locationName:"hostReservationSet" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeHostReservationsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeHostReservationsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetHostReservationSet sets the HostReservationSet field's value.
    +func (s *DescribeHostReservationsOutput) SetHostReservationSet(v []*HostReservation) *DescribeHostReservationsOutput {
    +	s.HostReservationSet = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeHostReservationsOutput) SetNextToken(v string) *DescribeHostReservationsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeHosts.
    +type DescribeHostsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * instance-type - The instance type size that the Dedicated Host is configured
    +	//    to support.
    +	//
    +	//    * auto-placement - Whether auto-placement is enabled or disabled (on |
    +	//    off).
    +	//
    +	//    * host-reservation-id - The ID of the reservation assigned to this host.
    +	//
    +	//    * client-token - The idempotency token you provided when you launched
    +	//    the instance
    +	//
    +	//    * state- The allocation state of the Dedicated Host (available | under-assessment
    +	//    | permanent-failure | released | released-permanent-failure).
    +	//
    +	//    * availability-zone - The Availability Zone of the host.
    +	Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"`
    +
    +	// The IDs of the Dedicated Hosts. The IDs are used for targeted instance launches.
    +	HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results can be seen by sending another request with the returned
    +	// nextToken value. This value can be between 5 and 500; if maxResults is given
    +	// a larger value than 500, you will receive an error. You cannot specify this
    +	// parameter and the host IDs parameter in the same request.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeHostsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeHostsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilter sets the Filter field's value.
    +func (s *DescribeHostsInput) SetFilter(v []*Filter) *DescribeHostsInput {
    +	s.Filter = v
    +	return s
    +}
    +
    +// SetHostIds sets the HostIds field's value.
    +func (s *DescribeHostsInput) SetHostIds(v []*string) *DescribeHostsInput {
    +	s.HostIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeHostsInput) SetMaxResults(v int64) *DescribeHostsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeHostsInput) SetNextToken(v string) *DescribeHostsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeHosts.
    +type DescribeHostsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the Dedicated Hosts.
    +	Hosts []*Host `locationName:"hostSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeHostsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeHostsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetHosts sets the Hosts field's value.
    +func (s *DescribeHostsOutput) SetHosts(v []*Host) *DescribeHostsOutput {
    +	s.Hosts = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeHostsOutput) SetNextToken(v string) *DescribeHostsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeIdFormat.
    +type DescribeIdFormatInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The type of resource: instance | reservation | snapshot | volume
    +	Resource *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeIdFormatInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeIdFormatInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetResource sets the Resource field's value.
    +func (s *DescribeIdFormatInput) SetResource(v string) *DescribeIdFormatInput {
    +	s.Resource = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeIdFormat.
    +type DescribeIdFormatOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the ID format for the resource.
    +	Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeIdFormatOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeIdFormatOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetStatuses sets the Statuses field's value.
    +func (s *DescribeIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeIdFormatOutput {
    +	s.Statuses = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeIdentityIdFormat.
    +type DescribeIdentityIdFormatInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ARN of the principal, which can be an IAM role, IAM user, or the root
    +	// user.
    +	//
    +	// PrincipalArn is a required field
    +	PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"`
    +
    +	// The type of resource: instance | reservation | snapshot | volume
    +	Resource *string `locationName:"resource" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeIdentityIdFormatInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeIdentityIdFormatInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeIdentityIdFormatInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityIdFormatInput"}
    +	if s.PrincipalArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetPrincipalArn sets the PrincipalArn field's value.
    +func (s *DescribeIdentityIdFormatInput) SetPrincipalArn(v string) *DescribeIdentityIdFormatInput {
    +	s.PrincipalArn = &v
    +	return s
    +}
    +
    +// SetResource sets the Resource field's value.
    +func (s *DescribeIdentityIdFormatInput) SetResource(v string) *DescribeIdentityIdFormatInput {
    +	s.Resource = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeIdentityIdFormat.
    +type DescribeIdentityIdFormatOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the ID format for the resources.
    +	Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeIdentityIdFormatOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeIdentityIdFormatOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetStatuses sets the Statuses field's value.
    +func (s *DescribeIdentityIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeIdentityIdFormatOutput {
    +	s.Statuses = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeImageAttribute.
    +type DescribeImageAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The AMI attribute.
    +	//
    +	// Note: Depending on your account privileges, the blockDeviceMapping attribute
    +	// may return a Client.AuthFailure error. If this happens, use DescribeImages
    +	// to get information about the block device mapping for the AMI.
    +	//
    +	// Attribute is a required field
    +	Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the AMI.
    +	//
    +	// ImageId is a required field
    +	ImageId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImageAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImageAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeImageAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeImageAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.ImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImageId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *DescribeImageAttributeInput) SetAttribute(v string) *DescribeImageAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeImageAttributeInput) SetDryRun(v bool) *DescribeImageAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *DescribeImageAttributeInput) SetImageId(v string) *DescribeImageAttributeInput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// Describes an image attribute.
    +type DescribeImageAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more block device mapping entries.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// A description for the AMI.
    +	Description *AttributeValue `locationName:"description" type:"structure"`
    +
    +	// The ID of the AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The kernel ID.
    +	KernelId *AttributeValue `locationName:"kernel" type:"structure"`
    +
    +	// One or more launch permissions.
    +	LaunchPermissions []*LaunchPermission `locationName:"launchPermission" locationNameList:"item" type:"list"`
    +
    +	// One or more product codes.
    +	ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
    +
    +	// The RAM disk ID.
    +	RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"`
    +
    +	// Indicates whether enhanced networking with the Intel 82599 Virtual Function
    +	// interface is enabled.
    +	SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImageAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImageAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *DescribeImageAttributeOutput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *DescribeImageAttributeOutput {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *DescribeImageAttributeOutput) SetDescription(v *AttributeValue) *DescribeImageAttributeOutput {
    +	s.Description = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *DescribeImageAttributeOutput) SetImageId(v string) *DescribeImageAttributeOutput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *DescribeImageAttributeOutput) SetKernelId(v *AttributeValue) *DescribeImageAttributeOutput {
    +	s.KernelId = v
    +	return s
    +}
    +
    +// SetLaunchPermissions sets the LaunchPermissions field's value.
    +func (s *DescribeImageAttributeOutput) SetLaunchPermissions(v []*LaunchPermission) *DescribeImageAttributeOutput {
    +	s.LaunchPermissions = v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *DescribeImageAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeImageAttributeOutput {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *DescribeImageAttributeOutput) SetRamdiskId(v *AttributeValue) *DescribeImageAttributeOutput {
    +	s.RamdiskId = v
    +	return s
    +}
    +
    +// SetSriovNetSupport sets the SriovNetSupport field's value.
    +func (s *DescribeImageAttributeOutput) SetSriovNetSupport(v *AttributeValue) *DescribeImageAttributeOutput {
    +	s.SriovNetSupport = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeImages.
    +type DescribeImagesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Scopes the images by users with explicit launch permissions. Specify an AWS
    +	// account ID, self (the sender of the request), or all (public AMIs).
    +	ExecutableUsers []*string `locationName:"ExecutableBy" locationNameList:"ExecutableBy" type:"list"`
    +
    +	// One or more filters.
    +	//
    +	//    * architecture - The image architecture (i386 | x86_64).
    +	//
    +	//    * block-device-mapping.delete-on-termination - A Boolean value that indicates
    +	//    whether the Amazon EBS volume is deleted on instance termination.
    +	//
    +	//    * block-device-mapping.device-name - The device name for the EBS volume
    +	//    (for example, /dev/sdh).
    +	//
    +	//    * block-device-mapping.snapshot-id - The ID of the snapshot used for the
    +	//    EBS volume.
    +	//
    +	//    * block-device-mapping.volume-size - The volume size of the EBS volume,
    +	//    in GiB.
    +	//
    +	//    * block-device-mapping.volume-type - The volume type of the EBS volume
    +	//    (gp2 | io1 | st1 | sc1 | standard).
    +	//
    +	//    * description - The description of the image (provided during image creation).
    +	//
    +	//    * hypervisor - The hypervisor type (ovm | xen).
    +	//
    +	//    * image-id - The ID of the image.
    +	//
    +	//    * image-type - The image type (machine | kernel | ramdisk).
    +	//
    +	//    * is-public - A Boolean that indicates whether the image is public.
    +	//
    +	//    * kernel-id - The kernel ID.
    +	//
    +	//    * manifest-location - The location of the image manifest.
    +	//
    +	//    * name - The name of the AMI (provided during image creation).
    +	//
    +	//    * owner-alias - String value from an Amazon-maintained list (amazon |
    +	//    aws-marketplace | microsoft) of snapshot owners. Not to be confused with
    +	//    the user-configured AWS account alias, which is set from the IAM console.
    +	//
    +	//    * owner-id - The AWS account ID of the image owner.
    +	//
    +	//    * platform - The platform. To only list Windows-based AMIs, use windows.
    +	//
    +	//    * product-code - The product code.
    +	//
    +	//    * product-code.type - The type of the product code (devpay | marketplace).
    +	//
    +	//    * ramdisk-id - The RAM disk ID.
    +	//
    +	//    * root-device-name - The name of the root device volume (for example,
    +	//    /dev/sda1).
    +	//
    +	//    * root-device-type - The type of the root device volume (ebs | instance-store).
    +	//
    +	//    * state - The state of the image (available | pending | failed).
    +	//
    +	//    * state-reason-code - The reason code for the state change.
    +	//
    +	//    * state-reason-message - The message for the state change.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * virtualization-type - The virtualization type (paravirtual | hvm).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more image IDs.
    +	//
    +	// Default: Describes all images available to you.
    +	ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"`
    +
    +	// Filters the images by the owner. Specify an AWS account ID, self (owner is
    +	// the sender of the request), or an AWS owner alias (valid values are amazon
    +	// | aws-marketplace | microsoft). Omitting this option returns all images for
    +	// which you have launch permissions, regardless of ownership.
    +	Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImagesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImagesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeImagesInput) SetDryRun(v bool) *DescribeImagesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetExecutableUsers sets the ExecutableUsers field's value.
    +func (s *DescribeImagesInput) SetExecutableUsers(v []*string) *DescribeImagesInput {
    +	s.ExecutableUsers = v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeImagesInput) SetFilters(v []*Filter) *DescribeImagesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetImageIds sets the ImageIds field's value.
    +func (s *DescribeImagesInput) SetImageIds(v []*string) *DescribeImagesInput {
    +	s.ImageIds = v
    +	return s
    +}
    +
    +// SetOwners sets the Owners field's value.
    +func (s *DescribeImagesInput) SetOwners(v []*string) *DescribeImagesInput {
    +	s.Owners = v
    +	return s
    +}
    +
    +// Contains the output of DescribeImages.
    +type DescribeImagesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more images.
    +	Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImagesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImagesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImages sets the Images field's value.
    +func (s *DescribeImagesOutput) SetImages(v []*Image) *DescribeImagesOutput {
    +	s.Images = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeImportImageTasks.
    +type DescribeImportImageTasksInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// Filter tasks using the task-state filter and one of the following values:
    +	// active, completed, deleting, deleted.
    +	Filters []*Filter `locationNameList:"Filter" type:"list"`
    +
    +	// A list of import image task IDs.
    +	ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"`
    +
    +	// The maximum number of results to return in a single call. To retrieve the
    +	// remaining results, make another call with the returned NextToken value.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// A token that indicates the next page of results.
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImportImageTasksInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImportImageTasksInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeImportImageTasksInput) SetDryRun(v bool) *DescribeImportImageTasksInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeImportImageTasksInput) SetFilters(v []*Filter) *DescribeImportImageTasksInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetImportTaskIds sets the ImportTaskIds field's value.
    +func (s *DescribeImportImageTasksInput) SetImportTaskIds(v []*string) *DescribeImportImageTasksInput {
    +	s.ImportTaskIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeImportImageTasksInput) SetMaxResults(v int64) *DescribeImportImageTasksInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeImportImageTasksInput) SetNextToken(v string) *DescribeImportImageTasksInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output for DescribeImportImageTasks.
    +type DescribeImportImageTasksOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A list of zero or more import image tasks that are currently active or were
    +	// completed or canceled in the previous 7 days.
    +	ImportImageTasks []*ImportImageTask `locationName:"importImageTaskSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to get the next page of results. This value is null when
    +	// there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImportImageTasksOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImportImageTasksOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImportImageTasks sets the ImportImageTasks field's value.
    +func (s *DescribeImportImageTasksOutput) SetImportImageTasks(v []*ImportImageTask) *DescribeImportImageTasksOutput {
    +	s.ImportImageTasks = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeImportImageTasksOutput) SetNextToken(v string) *DescribeImportImageTasksOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeImportSnapshotTasks.
    +type DescribeImportSnapshotTasksInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more filters.
    +	Filters []*Filter `locationNameList:"Filter" type:"list"`
    +
    +	// A list of import snapshot task IDs.
    +	ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"`
    +
    +	// The maximum number of results to return in a single call. To retrieve the
    +	// remaining results, make another call with the returned NextToken value.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// A token that indicates the next page of results.
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImportSnapshotTasksInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImportSnapshotTasksInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeImportSnapshotTasksInput) SetDryRun(v bool) *DescribeImportSnapshotTasksInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeImportSnapshotTasksInput) SetFilters(v []*Filter) *DescribeImportSnapshotTasksInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetImportTaskIds sets the ImportTaskIds field's value.
    +func (s *DescribeImportSnapshotTasksInput) SetImportTaskIds(v []*string) *DescribeImportSnapshotTasksInput {
    +	s.ImportTaskIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeImportSnapshotTasksInput) SetMaxResults(v int64) *DescribeImportSnapshotTasksInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeImportSnapshotTasksInput) SetNextToken(v string) *DescribeImportSnapshotTasksInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output for DescribeImportSnapshotTasks.
    +type DescribeImportSnapshotTasksOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A list of zero or more import snapshot tasks that are currently active or
    +	// were completed or canceled in the previous 7 days.
    +	ImportSnapshotTasks []*ImportSnapshotTask `locationName:"importSnapshotTaskSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to get the next page of results. This value is null when
    +	// there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeImportSnapshotTasksOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeImportSnapshotTasksOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImportSnapshotTasks sets the ImportSnapshotTasks field's value.
    +func (s *DescribeImportSnapshotTasksOutput) SetImportSnapshotTasks(v []*ImportSnapshotTask) *DescribeImportSnapshotTasksOutput {
    +	s.ImportSnapshotTasks = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeImportSnapshotTasksOutput) SetNextToken(v string) *DescribeImportSnapshotTasksOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeInstanceAttribute.
    +type DescribeInstanceAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The instance attribute.
    +	//
    +	// Note: The enaSupport attribute is not supported at this time.
    +	//
    +	// Attribute is a required field
    +	Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInstanceAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInstanceAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeInstanceAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *DescribeInstanceAttributeInput) SetAttribute(v string) *DescribeInstanceAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeInstanceAttributeInput) SetDryRun(v bool) *DescribeInstanceAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *DescribeInstanceAttributeInput) SetInstanceId(v string) *DescribeInstanceAttributeInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// Describes an instance attribute.
    +type DescribeInstanceAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The block device mapping of the instance.
    +	BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// If the value is true, you can't terminate the instance through the Amazon
    +	// EC2 console, CLI, or API; otherwise, you can.
    +	DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"`
    +
    +	// Indicates whether the instance is optimized for EBS I/O.
    +	EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"`
    +
    +	// Indicates whether enhanced networking with ENA is enabled.
    +	EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"`
    +
    +	// The security groups associated with the instance.
    +	Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// Indicates whether an instance stops or terminates when you initiate shutdown
    +	// from the instance (using the operating system command for system shutdown).
    +	InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"`
    +
    +	// The instance type.
    +	InstanceType *AttributeValue `locationName:"instanceType" type:"structure"`
    +
    +	// The kernel ID.
    +	KernelId *AttributeValue `locationName:"kernel" type:"structure"`
    +
    +	// A list of product codes.
    +	ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
    +
    +	// The RAM disk ID.
    +	RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"`
    +
    +	// The name of the root device (for example, /dev/sda1 or /dev/xvda).
    +	RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"`
    +
    +	// Indicates whether source/destination checking is enabled. A value of true
    +	// means checking is enabled, and false means checking is disabled. This value
    +	// must be false for a NAT instance to perform NAT.
    +	SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"`
    +
    +	// Indicates whether enhanced networking with the Intel 82599 Virtual Function
    +	// interface is enabled.
    +	SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"`
    +
    +	// The user data.
    +	UserData *AttributeValue `locationName:"userData" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInstanceAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInstanceAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *DescribeInstanceAttributeOutput) SetBlockDeviceMappings(v []*InstanceBlockDeviceMapping) *DescribeInstanceAttributeOutput {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetDisableApiTermination sets the DisableApiTermination field's value.
    +func (s *DescribeInstanceAttributeOutput) SetDisableApiTermination(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput {
    +	s.DisableApiTermination = v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *DescribeInstanceAttributeOutput) SetEbsOptimized(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput {
    +	s.EbsOptimized = v
    +	return s
    +}
    +
    +// SetEnaSupport sets the EnaSupport field's value.
    +func (s *DescribeInstanceAttributeOutput) SetEnaSupport(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput {
    +	s.EnaSupport = v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *DescribeInstanceAttributeOutput) SetGroups(v []*GroupIdentifier) *DescribeInstanceAttributeOutput {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *DescribeInstanceAttributeOutput) SetInstanceId(v string) *DescribeInstanceAttributeOutput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value.
    +func (s *DescribeInstanceAttributeOutput) SetInstanceInitiatedShutdownBehavior(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.InstanceInitiatedShutdownBehavior = v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *DescribeInstanceAttributeOutput) SetInstanceType(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.InstanceType = v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *DescribeInstanceAttributeOutput) SetKernelId(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.KernelId = v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *DescribeInstanceAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeInstanceAttributeOutput {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *DescribeInstanceAttributeOutput) SetRamdiskId(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.RamdiskId = v
    +	return s
    +}
    +
    +// SetRootDeviceName sets the RootDeviceName field's value.
    +func (s *DescribeInstanceAttributeOutput) SetRootDeviceName(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.RootDeviceName = v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *DescribeInstanceAttributeOutput) SetSourceDestCheck(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput {
    +	s.SourceDestCheck = v
    +	return s
    +}
    +
    +// SetSriovNetSupport sets the SriovNetSupport field's value.
    +func (s *DescribeInstanceAttributeOutput) SetSriovNetSupport(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.SriovNetSupport = v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *DescribeInstanceAttributeOutput) SetUserData(v *AttributeValue) *DescribeInstanceAttributeOutput {
    +	s.UserData = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeInstanceStatus.
    +type DescribeInstanceStatusInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone - The Availability Zone of the instance.
    +	//
    +	//    * event.code - The code for the scheduled event (instance-reboot | system-reboot
    +	//    | system-maintenance | instance-retirement | instance-stop).
    +	//
    +	//    * event.description - A description of the event.
    +	//
    +	//    * event.not-after - The latest end time for the scheduled event (for example,
    +	//    2014-09-15T17:15:20.000Z).
    +	//
    +	//    * event.not-before - The earliest start time for the scheduled event (for
    +	//    example, 2014-09-15T17:15:20.000Z).
    +	//
    +	//    * instance-state-code - The code for the instance state, as a 16-bit unsigned
    +	//    integer. The high byte is an opaque internal value and should be ignored.
    +	//    The low byte is set based on the state represented. The valid values are
    +	//    0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping),
    +	//    and 80 (stopped).
    +	//
    +	//    * instance-state-name - The state of the instance (pending | running |
    +	//    shutting-down | terminated | stopping | stopped).
    +	//
    +	//    * instance-status.reachability - Filters on instance status where the
    +	//    name is reachability (passed | failed | initializing | insufficient-data).
    +	//
    +	//    * instance-status.status - The status of the instance (ok | impaired |
    +	//    initializing | insufficient-data | not-applicable).
    +	//
    +	//    * system-status.reachability - Filters on system status where the name
    +	//    is reachability (passed | failed | initializing | insufficient-data).
    +	//
    +	//    * system-status.status - The system status of the instance (ok | impaired
    +	//    | initializing | insufficient-data | not-applicable).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// When true, includes the health status for all instances. When false, includes
    +	// the health status for running instances only.
    +	//
    +	// Default: false
    +	IncludeAllInstances *bool `locationName:"includeAllInstances" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// Default: Describes all your instances.
    +	//
    +	// Constraints: Maximum 100 explicitly specified instance IDs.
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
    +
    +	// The maximum number of results to return in a single call. To retrieve the
    +	// remaining results, make another call with the returned NextToken value. This
    +	// value can be between 5 and 1000. You cannot specify this parameter and the
    +	// instance IDs parameter in the same call.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInstanceStatusInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInstanceStatusInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeInstanceStatusInput) SetDryRun(v bool) *DescribeInstanceStatusInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeInstanceStatusInput) SetFilters(v []*Filter) *DescribeInstanceStatusInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetIncludeAllInstances sets the IncludeAllInstances field's value.
    +func (s *DescribeInstanceStatusInput) SetIncludeAllInstances(v bool) *DescribeInstanceStatusInput {
    +	s.IncludeAllInstances = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *DescribeInstanceStatusInput) SetInstanceIds(v []*string) *DescribeInstanceStatusInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeInstanceStatusInput) SetMaxResults(v int64) *DescribeInstanceStatusInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeInstanceStatusInput) SetNextToken(v string) *DescribeInstanceStatusInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeInstanceStatus.
    +type DescribeInstanceStatusOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more instance status descriptions.
    +	InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInstanceStatusOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInstanceStatusOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceStatuses sets the InstanceStatuses field's value.
    +func (s *DescribeInstanceStatusOutput) SetInstanceStatuses(v []*InstanceStatus) *DescribeInstanceStatusOutput {
    +	s.InstanceStatuses = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeInstanceStatusOutput) SetNextToken(v string) *DescribeInstanceStatusOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeInstances.
    +type DescribeInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * affinity - The affinity setting for an instance running on a Dedicated
    +	//    Host (default | host).
    +	//
    +	//    * architecture - The instance architecture (i386 | x86_64).
    +	//
    +	//    * availability-zone - The Availability Zone of the instance.
    +	//
    +	//    * block-device-mapping.attach-time - The attach time for an EBS volume
    +	//    mapped to the instance, for example, 2010-09-15T17:15:20.000Z.
    +	//
    +	//    * block-device-mapping.delete-on-termination - A Boolean that indicates
    +	//    whether the EBS volume is deleted on instance termination.
    +	//
    +	//    * block-device-mapping.device-name - The device name for the EBS volume
    +	//    (for example, /dev/sdh or xvdh).
    +	//
    +	//    * block-device-mapping.status - The status for the EBS volume (attaching
    +	//    | attached | detaching | detached).
    +	//
    +	//    * block-device-mapping.volume-id - The volume ID of the EBS volume.
    +	//
    +	//    * client-token - The idempotency token you provided when you launched
    +	//    the instance.
    +	//
    +	//    * dns-name - The public DNS name of the instance.
    +	//
    +	//    * group-id - The ID of the security group for the instance. EC2-Classic
    +	//    only.
    +	//
    +	//    * group-name - The name of the security group for the instance. EC2-Classic
    +	//    only.
    +	//
    +	//    * host-id - The ID of the Dedicated Host on which the instance is running,
    +	//    if applicable.
    +	//
    +	//    * hypervisor - The hypervisor type of the instance (ovm | xen).
    +	//
    +	//    * iam-instance-profile.arn - The instance profile associated with the
    +	//    instance. Specified as an ARN.
    +	//
    +	//    * image-id - The ID of the image used to launch the instance.
    +	//
    +	//    * instance-id - The ID of the instance.
    +	//
    +	//    * instance-lifecycle - Indicates whether this is a Spot Instance or a
    +	//    Scheduled Instance (spot | scheduled).
    +	//
    +	//    * instance-state-code - The state of the instance, as a 16-bit unsigned
    +	//    integer. The high byte is an opaque internal value and should be ignored.
    +	//    The low byte is set based on the state represented. The valid values are:
    +	//    0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping),
    +	//    and 80 (stopped).
    +	//
    +	//    * instance-state-name - The state of the instance (pending | running |
    +	//    shutting-down | terminated | stopping | stopped).
    +	//
    +	//    * instance-type - The type of instance (for example, t2.micro).
    +	//
    +	//    * instance.group-id - The ID of the security group for the instance.
    +	//
    +	//    * instance.group-name - The name of the security group for the instance.
    +	//
    +	//
    +	//    * ip-address - The public IP address of the instance.
    +	//
    +	//    * kernel-id - The kernel ID.
    +	//
    +	//    * key-name - The name of the key pair used when the instance was launched.
    +	//
    +	//    * launch-index - When launching multiple instances, this is the index
    +	//    for the instance in the launch group (for example, 0, 1, 2, and so on).
    +	//
    +	//
    +	//    * launch-time - The time when the instance was launched.
    +	//
    +	//    * monitoring-state - Indicates whether monitoring is enabled for the instance
    +	//    (disabled | enabled).
    +	//
    +	//    * owner-id - The AWS account ID of the instance owner.
    +	//
    +	//    * placement-group-name - The name of the placement group for the instance.
    +	//
    +	//    * platform - The platform. Use windows if you have Windows instances;
    +	//    otherwise, leave blank.
    +	//
    +	//    * private-dns-name - The private DNS name of the instance.
    +	//
    +	//    * private-ip-address - The private IP address of the instance.
    +	//
    +	//    * product-code - The product code associated with the AMI used to launch
    +	//    the instance.
    +	//
    +	//    * product-code.type - The type of product code (devpay | marketplace).
    +	//
    +	//    * ramdisk-id - The RAM disk ID.
    +	//
    +	//    * reason - The reason for the current state of the instance (for example,
    +	//    shows "User Initiated [date]" when you stop or terminate the instance).
    +	//    Similar to the state-reason-code filter.
    +	//
    +	//    * requester-id - The ID of the entity that launched the instance on your
    +	//    behalf (for example, AWS Management Console, Auto Scaling, and so on).
    +	//
    +	//    * reservation-id - The ID of the instance's reservation. A reservation
    +	//    ID is created any time you launch an instance. A reservation ID has a
    +	//    one-to-one relationship with an instance launch request, but can be associated
    +	//    with more than one instance if you launch multiple instances using the
    +	//    same launch request. For example, if you launch one instance, you'll get
    +	//    one reservation ID. If you launch ten instances using the same launch
    +	//    request, you'll also get one reservation ID.
    +	//
    +	//    * root-device-name - The name of the root device for the instance (for
    +	//    example, /dev/sda1 or /dev/xvda).
    +	//
    +	//    * root-device-type - The type of root device that the instance uses (ebs
    +	//    | instance-store).
    +	//
    +	//    * source-dest-check - Indicates whether the instance performs source/destination
    +	//    checking. A value of true means that checking is enabled, and false means
    +	//    checking is disabled. The value must be false for the instance to perform
    +	//    network address translation (NAT) in your VPC.
    +	//
    +	//    * spot-instance-request-id - The ID of the Spot instance request.
    +	//
    +	//    * state-reason-code - The reason code for the state change.
    +	//
    +	//    * state-reason-message - A message that describes the state change.
    +	//
    +	//    * subnet-id - The ID of the subnet for the instance.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource,
    +	//    where tag:key is the tag's key.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * tenancy - The tenancy of an instance (dedicated | default | host).
    +	//
    +	//    * virtualization-type - The virtualization type of the instance (paravirtual
    +	//    | hvm).
    +	//
    +	//    * vpc-id - The ID of the VPC that the instance is running in.
    +	//
    +	//    * network-interface.description - The description of the network interface.
    +	//
    +	//    * network-interface.subnet-id - The ID of the subnet for the network interface.
    +	//
    +	//    * network-interface.vpc-id - The ID of the VPC for the network interface.
    +	//
    +	//    * network-interface.network-interface-id - The ID of the network interface.
    +	//
    +	//    * network-interface.owner-id - The ID of the owner of the network interface.
    +	//
    +	//    * network-interface.availability-zone - The Availability Zone for the
    +	//    network interface.
    +	//
    +	//    * network-interface.requester-id - The requester ID for the network interface.
    +	//
    +	//    * network-interface.requester-managed - Indicates whether the network
    +	//    interface is being managed by AWS.
    +	//
    +	//    * network-interface.status - The status of the network interface (available)
    +	//    | in-use).
    +	//
    +	//    * network-interface.mac-address - The MAC address of the network interface.
    +	//
    +	//    * network-interface.private-dns-name - The private DNS name of the network
    +	//    interface.
    +	//
    +	//    * network-interface.source-dest-check - Whether the network interface
    +	//    performs source/destination checking. A value of true means checking is
    +	//    enabled, and false means checking is disabled. The value must be false
    +	//    for the network interface to perform network address translation (NAT)
    +	//    in your VPC.
    +	//
    +	//    * network-interface.group-id - The ID of a security group associated with
    +	//    the network interface.
    +	//
    +	//    * network-interface.group-name - The name of a security group associated
    +	//    with the network interface.
    +	//
    +	//    * network-interface.attachment.attachment-id - The ID of the interface
    +	//    attachment.
    +	//
    +	//    * network-interface.attachment.instance-id - The ID of the instance to
    +	//    which the network interface is attached.
    +	//
    +	//    * network-interface.attachment.instance-owner-id - The owner ID of the
    +	//    instance to which the network interface is attached.
    +	//
    +	//    * network-interface.addresses.private-ip-address - The private IP address
    +	//    associated with the network interface.
    +	//
    +	//    * network-interface.attachment.device-index - The device index to which
    +	//    the network interface is attached.
    +	//
    +	//    * network-interface.attachment.status - The status of the attachment (attaching
    +	//    | attached | detaching | detached).
    +	//
    +	//    * network-interface.attachment.attach-time - The time that the network
    +	//    interface was attached to an instance.
    +	//
    +	//    * network-interface.attachment.delete-on-termination - Specifies whether
    +	//    the attachment is deleted when an instance is terminated.
    +	//
    +	//    * network-interface.addresses.primary - Specifies whether the IP address
    +	//    of the network interface is the primary private IP address.
    +	//
    +	//    * network-interface.addresses.association.public-ip - The ID of the association
    +	//    of an Elastic IP address with a network interface.
    +	//
    +	//    * network-interface.addresses.association.ip-owner-id - The owner ID of
    +	//    the private IP address associated with the network interface.
    +	//
    +	//    * association.public-ip - The address of the Elastic IP address bound
    +	//    to the network interface.
    +	//
    +	//    * association.ip-owner-id - The owner of the Elastic IP address associated
    +	//    with the network interface.
    +	//
    +	//    * association.allocation-id - The allocation ID returned when you allocated
    +	//    the Elastic IP address for your network interface.
    +	//
    +	//    * association.association-id - The association ID returned when the network
    +	//    interface was associated with an IP address.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more instance IDs.
    +	//
    +	// Default: Describes all your instances.
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"`
    +
    +	// The maximum number of results to return in a single call. To retrieve the
    +	// remaining results, make another call with the returned NextToken value. This
    +	// value can be between 5 and 1000. You cannot specify this parameter and the
    +	// instance IDs parameter or tag filters in the same call.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token to request the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeInstancesInput) SetDryRun(v bool) *DescribeInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeInstancesInput) SetFilters(v []*Filter) *DescribeInstancesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *DescribeInstancesInput) SetInstanceIds(v []*string) *DescribeInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeInstancesInput) SetMaxResults(v int64) *DescribeInstancesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeInstances.
    +type DescribeInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Zero or more reservations.
    +	Reservations []*Reservation `locationName:"reservationSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeInstancesOutput) SetNextToken(v string) *DescribeInstancesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetReservations sets the Reservations field's value.
    +func (s *DescribeInstancesOutput) SetReservations(v []*Reservation) *DescribeInstancesOutput {
    +	s.Reservations = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeInternetGateways.
    +type DescribeInternetGatewaysInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * attachment.state - The current state of the attachment between the gateway
    +	//    and the VPC (available). Present only if a VPC is attached.
    +	//
    +	//    * attachment.vpc-id - The ID of an attached VPC.
    +	//
    +	//    * internet-gateway-id - The ID of the Internet gateway.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more Internet gateway IDs.
    +	//
    +	// Default: Describes all your Internet gateways.
    +	InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInternetGatewaysInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInternetGatewaysInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeInternetGatewaysInput) SetDryRun(v bool) *DescribeInternetGatewaysInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeInternetGatewaysInput) SetFilters(v []*Filter) *DescribeInternetGatewaysInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetInternetGatewayIds sets the InternetGatewayIds field's value.
    +func (s *DescribeInternetGatewaysInput) SetInternetGatewayIds(v []*string) *DescribeInternetGatewaysInput {
    +	s.InternetGatewayIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeInternetGateways.
    +type DescribeInternetGatewaysOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more Internet gateways.
    +	InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeInternetGatewaysOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeInternetGatewaysOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInternetGateways sets the InternetGateways field's value.
    +func (s *DescribeInternetGatewaysOutput) SetInternetGateways(v []*InternetGateway) *DescribeInternetGatewaysOutput {
    +	s.InternetGateways = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeKeyPairs.
    +type DescribeKeyPairsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * fingerprint - The fingerprint of the key pair.
    +	//
    +	//    * key-name - The name of the key pair.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more key pair names.
    +	//
    +	// Default: Describes all your key pairs.
    +	KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeKeyPairsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeKeyPairsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeKeyPairsInput) SetDryRun(v bool) *DescribeKeyPairsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeKeyPairsInput) SetFilters(v []*Filter) *DescribeKeyPairsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetKeyNames sets the KeyNames field's value.
    +func (s *DescribeKeyPairsInput) SetKeyNames(v []*string) *DescribeKeyPairsInput {
    +	s.KeyNames = v
    +	return s
    +}
    +
    +// Contains the output of DescribeKeyPairs.
    +type DescribeKeyPairsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more key pairs.
    +	KeyPairs []*KeyPairInfo `locationName:"keySet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeKeyPairsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeKeyPairsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKeyPairs sets the KeyPairs field's value.
    +func (s *DescribeKeyPairsOutput) SetKeyPairs(v []*KeyPairInfo) *DescribeKeyPairsOutput {
    +	s.KeyPairs = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeMovingAddresses.
    +type DescribeMovingAddressesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * moving-status - The status of the Elastic IP address (MovingToVpc |
    +	//    RestoringToClassic).
    +	Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results of the initial request can be seen by sending another
    +	// request with the returned NextToken value. This value can be between 5 and
    +	// 1000; if MaxResults is given a value outside of this range, an error is returned.
    +	//
    +	// Default: If no value is provided, the default is 1000.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token to use to retrieve the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// One or more Elastic IP addresses.
    +	PublicIps []*string `locationName:"publicIp" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeMovingAddressesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeMovingAddressesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeMovingAddressesInput) SetDryRun(v bool) *DescribeMovingAddressesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeMovingAddressesInput) SetFilters(v []*Filter) *DescribeMovingAddressesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeMovingAddressesInput) SetMaxResults(v int64) *DescribeMovingAddressesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeMovingAddressesInput) SetNextToken(v string) *DescribeMovingAddressesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetPublicIps sets the PublicIps field's value.
    +func (s *DescribeMovingAddressesInput) SetPublicIps(v []*string) *DescribeMovingAddressesInput {
    +	s.PublicIps = v
    +	return s
    +}
    +
    +// Contains the output of DescribeMovingAddresses.
    +type DescribeMovingAddressesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The status for each Elastic IP address.
    +	MovingAddressStatuses []*MovingAddressStatus `locationName:"movingAddressStatusSet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeMovingAddressesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeMovingAddressesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetMovingAddressStatuses sets the MovingAddressStatuses field's value.
    +func (s *DescribeMovingAddressesOutput) SetMovingAddressStatuses(v []*MovingAddressStatus) *DescribeMovingAddressesOutput {
    +	s.MovingAddressStatuses = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeMovingAddressesOutput) SetNextToken(v string) *DescribeMovingAddressesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeNatGateways.
    +type DescribeNatGatewaysInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * nat-gateway-id - The ID of the NAT gateway.
    +	//
    +	//    * state - The state of the NAT gateway (pending | failed | available |
    +	//    deleting | deleted).
    +	//
    +	//    * subnet-id - The ID of the subnet in which the NAT gateway resides.
    +	//
    +	//    * vpc-id - The ID of the VPC in which the NAT gateway resides.
    +	Filter []*Filter `locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of items to return for this request. The request returns
    +	// a token that you can specify in a subsequent call to get the next set of
    +	// results.
    +	//
    +	// Constraint: If the value specified is greater than 1000, we return only 1000
    +	// items.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// One or more NAT gateway IDs.
    +	NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNatGatewaysInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNatGatewaysInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilter sets the Filter field's value.
    +func (s *DescribeNatGatewaysInput) SetFilter(v []*Filter) *DescribeNatGatewaysInput {
    +	s.Filter = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeNatGatewaysInput) SetMaxResults(v int64) *DescribeNatGatewaysInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNatGatewayIds sets the NatGatewayIds field's value.
    +func (s *DescribeNatGatewaysInput) SetNatGatewayIds(v []*string) *DescribeNatGatewaysInput {
    +	s.NatGatewayIds = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeNatGatewaysInput) SetNextToken(v string) *DescribeNatGatewaysInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeNatGateways.
    +type DescribeNatGatewaysOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the NAT gateways.
    +	NatGateways []*NatGateway `locationName:"natGatewaySet" locationNameList:"item" type:"list"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNatGatewaysOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNatGatewaysOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNatGateways sets the NatGateways field's value.
    +func (s *DescribeNatGatewaysOutput) SetNatGateways(v []*NatGateway) *DescribeNatGatewaysOutput {
    +	s.NatGateways = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeNatGatewaysOutput) SetNextToken(v string) *DescribeNatGatewaysOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeNetworkAcls.
    +type DescribeNetworkAclsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * association.association-id - The ID of an association ID for the ACL.
    +	//
    +	//    * association.network-acl-id - The ID of the network ACL involved in the
    +	//    association.
    +	//
    +	//    * association.subnet-id - The ID of the subnet involved in the association.
    +	//
    +	//    * default - Indicates whether the ACL is the default network ACL for the
    +	//    VPC.
    +	//
    +	//    * entry.cidr - The CIDR range specified in the entry.
    +	//
    +	//    * entry.egress - Indicates whether the entry applies to egress traffic.
    +	//
    +	//    * entry.icmp.code - The ICMP code specified in the entry, if any.
    +	//
    +	//    * entry.icmp.type - The ICMP type specified in the entry, if any.
    +	//
    +	//    * entry.port-range.from - The start of the port range specified in the
    +	//    entry.
    +	//
    +	//    * entry.port-range.to - The end of the port range specified in the entry.
    +	//
    +	//
    +	//    * entry.protocol - The protocol specified in the entry (tcp | udp | icmp
    +	//    or a protocol number).
    +	//
    +	//    * entry.rule-action - Allows or denies the matching traffic (allow | deny).
    +	//
    +	//    * entry.rule-number - The number of an entry (in other words, rule) in
    +	//    the ACL's set of entries.
    +	//
    +	//    * network-acl-id - The ID of the network ACL.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-id - The ID of the VPC for the network ACL.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more network ACL IDs.
    +	//
    +	// Default: Describes all your network ACLs.
    +	NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNetworkAclsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNetworkAclsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeNetworkAclsInput) SetDryRun(v bool) *DescribeNetworkAclsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeNetworkAclsInput) SetFilters(v []*Filter) *DescribeNetworkAclsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetNetworkAclIds sets the NetworkAclIds field's value.
    +func (s *DescribeNetworkAclsInput) SetNetworkAclIds(v []*string) *DescribeNetworkAclsInput {
    +	s.NetworkAclIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeNetworkAcls.
    +type DescribeNetworkAclsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more network ACLs.
    +	NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNetworkAclsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNetworkAclsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNetworkAcls sets the NetworkAcls field's value.
    +func (s *DescribeNetworkAclsOutput) SetNetworkAcls(v []*NetworkAcl) *DescribeNetworkAclsOutput {
    +	s.NetworkAcls = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeNetworkInterfaceAttribute.
    +type DescribeNetworkInterfaceAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attribute of the network interface.
    +	Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNetworkInterfaceAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNetworkInterfaceAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeNetworkInterfaceAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfaceAttributeInput"}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *DescribeNetworkInterfaceAttributeInput) SetAttribute(v string) *DescribeNetworkInterfaceAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeNetworkInterfaceAttributeInput) SetDryRun(v bool) *DescribeNetworkInterfaceAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *DescribeNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) *DescribeNetworkInterfaceAttributeInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeNetworkInterfaceAttribute.
    +type DescribeNetworkInterfaceAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attachment (if any) of the network interface.
    +	Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"`
    +
    +	// The description of the network interface.
    +	Description *AttributeValue `locationName:"description" type:"structure"`
    +
    +	// The security groups associated with the network interface.
    +	Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// Indicates whether source/destination checking is enabled.
    +	SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNetworkInterfaceAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNetworkInterfaceAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachment sets the Attachment field's value.
    +func (s *DescribeNetworkInterfaceAttributeOutput) SetAttachment(v *NetworkInterfaceAttachment) *DescribeNetworkInterfaceAttributeOutput {
    +	s.Attachment = v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *DescribeNetworkInterfaceAttributeOutput) SetDescription(v *AttributeValue) *DescribeNetworkInterfaceAttributeOutput {
    +	s.Description = v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *DescribeNetworkInterfaceAttributeOutput) SetGroups(v []*GroupIdentifier) *DescribeNetworkInterfaceAttributeOutput {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *DescribeNetworkInterfaceAttributeOutput) SetNetworkInterfaceId(v string) *DescribeNetworkInterfaceAttributeOutput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *DescribeNetworkInterfaceAttributeOutput) SetSourceDestCheck(v *AttributeBooleanValue) *DescribeNetworkInterfaceAttributeOutput {
    +	s.SourceDestCheck = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeNetworkInterfaces.
    +type DescribeNetworkInterfacesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * addresses.private-ip-address - The private IP addresses associated with
    +	//    the network interface.
    +	//
    +	//    * addresses.primary - Whether the private IP address is the primary IP
    +	//    address associated with the network interface.
    +	//
    +	//    * addresses.association.public-ip - The association ID returned when the
    +	//    network interface was associated with the Elastic IP address.
    +	//
    +	//    * addresses.association.owner-id - The owner ID of the addresses associated
    +	//    with the network interface.
    +	//
    +	//    * association.association-id - The association ID returned when the network
    +	//    interface was associated with an IP address.
    +	//
    +	//    * association.allocation-id - The allocation ID returned when you allocated
    +	//    the Elastic IP address for your network interface.
    +	//
    +	//    * association.ip-owner-id - The owner of the Elastic IP address associated
    +	//    with the network interface.
    +	//
    +	//    * association.public-ip - The address of the Elastic IP address bound
    +	//    to the network interface.
    +	//
    +	//    * association.public-dns-name - The public DNS name for the network interface.
    +	//
    +	//    * attachment.attachment-id - The ID of the interface attachment.
    +	//
    +	//    * attachment.attach.time - The time that the network interface was attached
    +	//    to an instance.
    +	//
    +	//    * attachment.delete-on-termination - Indicates whether the attachment
    +	//    is deleted when an instance is terminated.
    +	//
    +	//    * attachment.device-index - The device index to which the network interface
    +	//    is attached.
    +	//
    +	//    * attachment.instance-id - The ID of the instance to which the network
    +	//    interface is attached.
    +	//
    +	//    * attachment.instance-owner-id - The owner ID of the instance to which
    +	//    the network interface is attached.
    +	//
    +	//    * attachment.nat-gateway-id - The ID of the NAT gateway to which the network
    +	//    interface is attached.
    +	//
    +	//    * attachment.status - The status of the attachment (attaching | attached
    +	//    | detaching | detached).
    +	//
    +	//    * availability-zone - The Availability Zone of the network interface.
    +	//
    +	//    * description - The description of the network interface.
    +	//
    +	//    * group-id - The ID of a security group associated with the network interface.
    +	//
    +	//    * group-name - The name of a security group associated with the network
    +	//    interface.
    +	//
    +	//    * mac-address - The MAC address of the network interface.
    +	//
    +	//    * network-interface-id - The ID of the network interface.
    +	//
    +	//    * owner-id - The AWS account ID of the network interface owner.
    +	//
    +	//    * private-ip-address - The private IP address or addresses of the network
    +	//    interface.
    +	//
    +	//    * private-dns-name - The private DNS name of the network interface.
    +	//
    +	//    * requester-id - The ID of the entity that launched the instance on your
    +	//    behalf (for example, AWS Management Console, Auto Scaling, and so on).
    +	//
    +	//    * requester-managed - Indicates whether the network interface is being
    +	//    managed by an AWS service (for example, AWS Management Console, Auto Scaling,
    +	//    and so on).
    +	//
    +	//    * source-desk-check - Indicates whether the network interface performs
    +	//    source/destination checking. A value of true means checking is enabled,
    +	//    and false means checking is disabled. The value must be false for the
    +	//    network interface to perform network address translation (NAT) in your
    +	//    VPC.
    +	//
    +	//    * status - The status of the network interface. If the network interface
    +	//    is not attached to an instance, the status is available; if a network
    +	//    interface is attached to an instance the status is in-use.
    +	//
    +	//    * subnet-id - The ID of the subnet for the network interface.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-id - The ID of the VPC for the network interface.
    +	Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more network interface IDs.
    +	//
    +	// Default: Describes all your network interfaces.
    +	NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNetworkInterfacesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNetworkInterfacesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeNetworkInterfacesInput) SetDryRun(v bool) *DescribeNetworkInterfacesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeNetworkInterfacesInput) SetFilters(v []*Filter) *DescribeNetworkInterfacesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value.
    +func (s *DescribeNetworkInterfacesInput) SetNetworkInterfaceIds(v []*string) *DescribeNetworkInterfacesInput {
    +	s.NetworkInterfaceIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeNetworkInterfaces.
    +type DescribeNetworkInterfacesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more network interfaces.
    +	NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeNetworkInterfacesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeNetworkInterfacesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *DescribeNetworkInterfacesOutput) SetNetworkInterfaces(v []*NetworkInterface) *DescribeNetworkInterfacesOutput {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribePlacementGroups.
    +type DescribePlacementGroupsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * group-name - The name of the placement group.
    +	//
    +	//    * state - The state of the placement group (pending | available | deleting
    +	//    | deleted).
    +	//
    +	//    * strategy - The strategy of the placement group (cluster).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more placement group names.
    +	//
    +	// Default: Describes all your placement groups, or only those otherwise specified.
    +	GroupNames []*string `locationName:"groupName" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribePlacementGroupsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribePlacementGroupsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribePlacementGroupsInput) SetDryRun(v bool) *DescribePlacementGroupsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribePlacementGroupsInput) SetFilters(v []*Filter) *DescribePlacementGroupsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetGroupNames sets the GroupNames field's value.
    +func (s *DescribePlacementGroupsInput) SetGroupNames(v []*string) *DescribePlacementGroupsInput {
    +	s.GroupNames = v
    +	return s
    +}
    +
    +// Contains the output of DescribePlacementGroups.
    +type DescribePlacementGroupsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more placement groups.
    +	PlacementGroups []*PlacementGroup `locationName:"placementGroupSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribePlacementGroupsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribePlacementGroupsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetPlacementGroups sets the PlacementGroups field's value.
    +func (s *DescribePlacementGroupsOutput) SetPlacementGroups(v []*PlacementGroup) *DescribePlacementGroupsOutput {
    +	s.PlacementGroups = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribePrefixLists.
    +type DescribePrefixListsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * prefix-list-id: The ID of a prefix list.
    +	//
    +	//    * prefix-list-name: The name of a prefix list.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of items to return for this request. The request returns
    +	// a token that you can specify in a subsequent call to get the next set of
    +	// results.
    +	//
    +	// Constraint: If the value specified is greater than 1000, we return only 1000
    +	// items.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token for the next set of items to return. (You received this token from
    +	// a prior call.)
    +	NextToken *string `type:"string"`
    +
    +	// One or more prefix list IDs.
    +	PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribePrefixListsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribePrefixListsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribePrefixListsInput) SetDryRun(v bool) *DescribePrefixListsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribePrefixListsInput) SetFilters(v []*Filter) *DescribePrefixListsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribePrefixListsInput) SetMaxResults(v int64) *DescribePrefixListsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribePrefixListsInput) SetNextToken(v string) *DescribePrefixListsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetPrefixListIds sets the PrefixListIds field's value.
    +func (s *DescribePrefixListsInput) SetPrefixListIds(v []*string) *DescribePrefixListsInput {
    +	s.PrefixListIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribePrefixLists.
    +type DescribePrefixListsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use when requesting the next set of items. If there are no additional
    +	// items to return, the string is empty.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// All available prefix lists.
    +	PrefixLists []*PrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribePrefixListsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribePrefixListsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribePrefixListsOutput) SetNextToken(v string) *DescribePrefixListsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetPrefixLists sets the PrefixLists field's value.
    +func (s *DescribePrefixListsOutput) SetPrefixLists(v []*PrefixList) *DescribePrefixListsOutput {
    +	s.PrefixLists = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeRegions.
    +type DescribeRegionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com).
    +	//
    +	//    * region-name - The name of the region (for example, us-east-1).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The names of one or more regions.
    +	RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeRegionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeRegionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeRegionsInput) SetDryRun(v bool) *DescribeRegionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeRegionsInput) SetFilters(v []*Filter) *DescribeRegionsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetRegionNames sets the RegionNames field's value.
    +func (s *DescribeRegionsInput) SetRegionNames(v []*string) *DescribeRegionsInput {
    +	s.RegionNames = v
    +	return s
    +}
    +
    +// Contains the output of DescribeRegions.
    +type DescribeRegionsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more regions.
    +	Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeRegionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeRegionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetRegions sets the Regions field's value.
    +func (s *DescribeRegionsOutput) SetRegions(v []*Region) *DescribeRegionsOutput {
    +	s.Regions = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeReservedInstances.
    +type DescribeReservedInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone - The Availability Zone where the Reserved Instance
    +	//    can be used.
    +	//
    +	//    * duration - The duration of the Reserved Instance (one year or three
    +	//    years), in seconds (31536000 | 94608000).
    +	//
    +	//    * end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).
    +	//
    +	//    * fixed-price - The purchase price of the Reserved Instance (for example,
    +	//    9800.0).
    +	//
    +	//    * instance-type - The instance type that is covered by the reservation.
    +	//
    +	//    * scope - The scope of the Reserved Instance (Region or Availability Zone).
    +	//
    +	//    * product-description - The Reserved Instance product platform description.
    +	//    Instances that include (Amazon VPC) in the product platform description
    +	//    will only be displayed to EC2-Classic account holders and are for use
    +	//    with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE
    +	//    Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux
    +	//    (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server
    +	//    Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with
    +	//    SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with
    +	//    SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)).
    +	//
    +	//    * reserved-instances-id - The ID of the Reserved Instance.
    +	//
    +	//    * start - The time at which the Reserved Instance purchase request was
    +	//    placed (for example, 2014-08-07T11:54:42.000Z).
    +	//
    +	//    * state - The state of the Reserved Instance (payment-pending | active
    +	//    | payment-failed | retired).
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * usage-price - The usage price of the Reserved Instance, per hour (for
    +	//    example, 0.84).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// Describes whether the Reserved Instance is Standard or Convertible.
    +	OfferingClass *string `type:"string" enum:"OfferingClassType"`
    +
    +	// The Reserved Instance offering type. If you are using tools that predate
    +	// the 2011-11-01 API version, you only have access to the Medium Utilization
    +	// Reserved Instance offering type.
    +	OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"`
    +
    +	// One or more Reserved Instance IDs.
    +	//
    +	// Default: Describes all your Reserved Instances, or only those otherwise specified.
    +	ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeReservedInstancesInput) SetDryRun(v bool) *DescribeReservedInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeReservedInstancesInput) SetFilters(v []*Filter) *DescribeReservedInstancesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetOfferingClass sets the OfferingClass field's value.
    +func (s *DescribeReservedInstancesInput) SetOfferingClass(v string) *DescribeReservedInstancesInput {
    +	s.OfferingClass = &v
    +	return s
    +}
    +
    +// SetOfferingType sets the OfferingType field's value.
    +func (s *DescribeReservedInstancesInput) SetOfferingType(v string) *DescribeReservedInstancesInput {
    +	s.OfferingType = &v
    +	return s
    +}
    +
    +// SetReservedInstancesIds sets the ReservedInstancesIds field's value.
    +func (s *DescribeReservedInstancesInput) SetReservedInstancesIds(v []*string) *DescribeReservedInstancesInput {
    +	s.ReservedInstancesIds = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeReservedInstancesListings.
    +type DescribeReservedInstancesListingsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * reserved-instances-id - The ID of the Reserved Instances.
    +	//
    +	//    * reserved-instances-listing-id - The ID of the Reserved Instances listing.
    +	//
    +	//    * status - The status of the Reserved Instance listing (pending | active
    +	//    | cancelled | closed).
    +	//
    +	//    * status-message - The reason for the status.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more Reserved Instance IDs.
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
    +
    +	// One or more Reserved Instance listing IDs.
    +	ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesListingsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesListingsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeReservedInstancesListingsInput) SetFilters(v []*Filter) *DescribeReservedInstancesListingsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *DescribeReservedInstancesListingsInput) SetReservedInstancesId(v string) *DescribeReservedInstancesListingsInput {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// SetReservedInstancesListingId sets the ReservedInstancesListingId field's value.
    +func (s *DescribeReservedInstancesListingsInput) SetReservedInstancesListingId(v string) *DescribeReservedInstancesListingsInput {
    +	s.ReservedInstancesListingId = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeReservedInstancesListings.
    +type DescribeReservedInstancesListingsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the Reserved Instance listing.
    +	ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesListingsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesListingsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesListings sets the ReservedInstancesListings field's value.
    +func (s *DescribeReservedInstancesListingsOutput) SetReservedInstancesListings(v []*ReservedInstancesListing) *DescribeReservedInstancesListingsOutput {
    +	s.ReservedInstancesListings = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeReservedInstancesModifications.
    +type DescribeReservedInstancesModificationsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more filters.
    +	//
    +	//    * client-token - The idempotency token for the modification request.
    +	//
    +	//    * create-date - The time when the modification request was created.
    +	//
    +	//    * effective-date - The time when the modification becomes effective.
    +	//
    +	//    * modification-result.reserved-instances-id - The ID for the Reserved
    +	//    Instances created as part of the modification request. This ID is only
    +	//    available when the status of the modification is fulfilled.
    +	//
    +	//    * modification-result.target-configuration.availability-zone - The Availability
    +	//    Zone for the new Reserved Instances.
    +	//
    +	//    * modification-result.target-configuration.instance-count  - The number
    +	//    of new Reserved Instances.
    +	//
    +	//    * modification-result.target-configuration.instance-type - The instance
    +	//    type of the new Reserved Instances.
    +	//
    +	//    * modification-result.target-configuration.platform - The network platform
    +	//    of the new Reserved Instances (EC2-Classic | EC2-VPC).
    +	//
    +	//    * reserved-instances-id - The ID of the Reserved Instances modified.
    +	//
    +	//    * reserved-instances-modification-id - The ID of the modification request.
    +	//
    +	//    * status - The status of the Reserved Instances modification request (processing
    +	//    | fulfilled | failed).
    +	//
    +	//    * status-message - The reason for the status.
    +	//
    +	//    * update-date - The time when the modification request was last updated.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// IDs for the submitted modification request.
    +	ReservedInstancesModificationIds []*string `locationName:"ReservedInstancesModificationId" locationNameList:"ReservedInstancesModificationId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesModificationsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesModificationsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeReservedInstancesModificationsInput) SetFilters(v []*Filter) *DescribeReservedInstancesModificationsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeReservedInstancesModificationsInput) SetNextToken(v string) *DescribeReservedInstancesModificationsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetReservedInstancesModificationIds sets the ReservedInstancesModificationIds field's value.
    +func (s *DescribeReservedInstancesModificationsInput) SetReservedInstancesModificationIds(v []*string) *DescribeReservedInstancesModificationsInput {
    +	s.ReservedInstancesModificationIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeReservedInstancesModifications.
    +type DescribeReservedInstancesModificationsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The Reserved Instance modification information.
    +	ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesModificationsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesModificationsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeReservedInstancesModificationsOutput) SetNextToken(v string) *DescribeReservedInstancesModificationsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetReservedInstancesModifications sets the ReservedInstancesModifications field's value.
    +func (s *DescribeReservedInstancesModificationsOutput) SetReservedInstancesModifications(v []*ReservedInstancesModification) *DescribeReservedInstancesModificationsOutput {
    +	s.ReservedInstancesModifications = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeReservedInstancesOfferings.
    +type DescribeReservedInstancesOfferingsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone in which the Reserved Instance can be used.
    +	AvailabilityZone *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone - The Availability Zone where the Reserved Instance
    +	//    can be used.
    +	//
    +	//    * duration - The duration of the Reserved Instance (for example, one year
    +	//    or three years), in seconds (31536000 | 94608000).
    +	//
    +	//    * fixed-price - The purchase price of the Reserved Instance (for example,
    +	//    9800.0).
    +	//
    +	//    * instance-type - The instance type that is covered by the reservation.
    +	//
    +	//    * marketplace - Set to true to show only Reserved Instance Marketplace
    +	//    offerings. When this filter is not used, which is the default behavior,
    +	//    all offerings from both AWS and the Reserved Instance Marketplace are
    +	//    listed.
    +	//
    +	//    * product-description - The Reserved Instance product platform description.
    +	//    Instances that include (Amazon VPC) in the product platform description
    +	//    will only be displayed to EC2-Classic account holders and are for use
    +	//    with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux |
    +	//    SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise
    +	//    Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL
    +	//    Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows
    +	//    with SQL Server Web |  Windows with SQL Server Web (Amazon VPC) | Windows
    +	//    with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon
    +	//    VPC))
    +	//
    +	//    * reserved-instances-offering-id - The Reserved Instances offering ID.
    +	//
    +	//    * scope - The scope of the Reserved Instance (Availability Zone or Region).
    +	//
    +	//    * usage-price - The usage price of the Reserved Instance, per hour (for
    +	//    example, 0.84).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// Include Reserved Instance Marketplace offerings in the response.
    +	IncludeMarketplace *bool `type:"boolean"`
    +
    +	// The tenancy of the instances covered by the reservation. A Reserved Instance
    +	// with a tenancy of dedicated is applied to instances that run in a VPC on
    +	// single-tenant hardware (i.e., Dedicated Instances).
    +	//
    +	// Default: default
    +	InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"`
    +
    +	// The instance type that the reservation will cover (for example, m1.small).
    +	// For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	InstanceType *string `type:"string" enum:"InstanceType"`
    +
    +	// The maximum duration (in seconds) to filter when searching for offerings.
    +	//
    +	// Default: 94608000 (3 years)
    +	MaxDuration *int64 `type:"long"`
    +
    +	// The maximum number of instances to filter when searching for offerings.
    +	//
    +	// Default: 20
    +	MaxInstanceCount *int64 `type:"integer"`
    +
    +	// The maximum number of results to return for the request in a single page.
    +	// The remaining results of the initial request can be seen by sending another
    +	// request with the returned NextToken value. The maximum is 100.
    +	//
    +	// Default: 100
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The minimum duration (in seconds) to filter when searching for offerings.
    +	//
    +	// Default: 2592000 (1 month)
    +	MinDuration *int64 `type:"long"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The offering class of the Reserved Instance. Can be standard or convertible.
    +	OfferingClass *string `type:"string" enum:"OfferingClassType"`
    +
    +	// The Reserved Instance offering type. If you are using tools that predate
    +	// the 2011-11-01 API version, you only have access to the Medium Utilization
    +	// Reserved Instance offering type.
    +	OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"`
    +
    +	// The Reserved Instance product platform description. Instances that include
    +	// (Amazon VPC) in the description are for use with Amazon VPC.
    +	ProductDescription *string `type:"string" enum:"RIProductDescription"`
    +
    +	// One or more Reserved Instances offering IDs.
    +	ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesOfferingsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesOfferingsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetAvailabilityZone(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetDryRun(v bool) *DescribeReservedInstancesOfferingsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetFilters(v []*Filter) *DescribeReservedInstancesOfferingsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetIncludeMarketplace sets the IncludeMarketplace field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetIncludeMarketplace(v bool) *DescribeReservedInstancesOfferingsInput {
    +	s.IncludeMarketplace = &v
    +	return s
    +}
    +
    +// SetInstanceTenancy sets the InstanceTenancy field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetInstanceTenancy(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.InstanceTenancy = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetInstanceType(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetMaxDuration sets the MaxDuration field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetMaxDuration(v int64) *DescribeReservedInstancesOfferingsInput {
    +	s.MaxDuration = &v
    +	return s
    +}
    +
    +// SetMaxInstanceCount sets the MaxInstanceCount field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetMaxInstanceCount(v int64) *DescribeReservedInstancesOfferingsInput {
    +	s.MaxInstanceCount = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetMaxResults(v int64) *DescribeReservedInstancesOfferingsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetMinDuration sets the MinDuration field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetMinDuration(v int64) *DescribeReservedInstancesOfferingsInput {
    +	s.MinDuration = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetNextToken(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetOfferingClass sets the OfferingClass field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetOfferingClass(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.OfferingClass = &v
    +	return s
    +}
    +
    +// SetOfferingType sets the OfferingType field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetOfferingType(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.OfferingType = &v
    +	return s
    +}
    +
    +// SetProductDescription sets the ProductDescription field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetProductDescription(v string) *DescribeReservedInstancesOfferingsInput {
    +	s.ProductDescription = &v
    +	return s
    +}
    +
    +// SetReservedInstancesOfferingIds sets the ReservedInstancesOfferingIds field's value.
    +func (s *DescribeReservedInstancesOfferingsInput) SetReservedInstancesOfferingIds(v []*string) *DescribeReservedInstancesOfferingsInput {
    +	s.ReservedInstancesOfferingIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeReservedInstancesOfferings.
    +type DescribeReservedInstancesOfferingsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// A list of Reserved Instances offerings.
    +	ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesOfferingsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesOfferingsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeReservedInstancesOfferingsOutput) SetNextToken(v string) *DescribeReservedInstancesOfferingsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetReservedInstancesOfferings sets the ReservedInstancesOfferings field's value.
    +func (s *DescribeReservedInstancesOfferingsOutput) SetReservedInstancesOfferings(v []*ReservedInstancesOffering) *DescribeReservedInstancesOfferingsOutput {
    +	s.ReservedInstancesOfferings = v
    +	return s
    +}
    +
    +// Contains the output for DescribeReservedInstances.
    +type DescribeReservedInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A list of Reserved Instances.
    +	ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeReservedInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeReservedInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstances sets the ReservedInstances field's value.
    +func (s *DescribeReservedInstancesOutput) SetReservedInstances(v []*ReservedInstances) *DescribeReservedInstancesOutput {
    +	s.ReservedInstances = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeRouteTables.
    +type DescribeRouteTablesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * association.route-table-association-id - The ID of an association ID
    +	//    for the route table.
    +	//
    +	//    * association.route-table-id - The ID of the route table involved in the
    +	//    association.
    +	//
    +	//    * association.subnet-id - The ID of the subnet involved in the association.
    +	//
    +	//    * association.main - Indicates whether the route table is the main route
    +	//    table for the VPC (true | false).
    +	//
    +	//    * route-table-id - The ID of the route table.
    +	//
    +	//    * route.destination-cidr-block - The CIDR range specified in a route in
    +	//    the table.
    +	//
    +	//    * route.destination-prefix-list-id - The ID (prefix) of the AWS service
    +	//    specified in a route in the table.
    +	//
    +	//    * route.gateway-id - The ID of a gateway specified in a route in the table.
    +	//
    +	//    * route.instance-id - The ID of an instance specified in a route in the
    +	//    table.
    +	//
    +	//    * route.nat-gateway-id - The ID of a NAT gateway.
    +	//
    +	//    * route.origin - Describes how the route was created. CreateRouteTable
    +	//    indicates that the route was automatically created when the route table
    +	//    was created; CreateRoute indicates that the route was manually added to
    +	//    the route table; EnableVgwRoutePropagation indicates that the route was
    +	//    propagated by route propagation.
    +	//
    +	//    * route.state - The state of a route in the route table (active | blackhole).
    +	//    The blackhole state indicates that the route's target isn't available
    +	//    (for example, the specified gateway isn't attached to the VPC, the specified
    +	//    NAT instance has been terminated, and so on).
    +	//
    +	//    * route.vpc-peering-connection-id - The ID of a VPC peering connection
    +	//    specified in a route in the table.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-id - The ID of the VPC for the route table.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more route table IDs.
    +	//
    +	// Default: Describes all your route tables.
    +	RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeRouteTablesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeRouteTablesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeRouteTablesInput) SetDryRun(v bool) *DescribeRouteTablesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeRouteTablesInput) SetFilters(v []*Filter) *DescribeRouteTablesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetRouteTableIds sets the RouteTableIds field's value.
    +func (s *DescribeRouteTablesInput) SetRouteTableIds(v []*string) *DescribeRouteTablesInput {
    +	s.RouteTableIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeRouteTables.
    +type DescribeRouteTablesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more route tables.
    +	RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeRouteTablesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeRouteTablesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetRouteTables sets the RouteTables field's value.
    +func (s *DescribeRouteTablesOutput) SetRouteTables(v []*RouteTable) *DescribeRouteTablesOutput {
    +	s.RouteTables = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeScheduledInstanceAvailability.
    +type DescribeScheduledInstanceAvailabilityInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone - The Availability Zone (for example, us-west-2a).
    +	//
    +	//    * instance-type - The instance type (for example, c4.large).
    +	//
    +	//    * network-platform - The network platform (EC2-Classic or EC2-VPC).
    +	//
    +	//    * platform - The platform (Linux/UNIX or Windows).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The time period for the first schedule to start.
    +	//
    +	// FirstSlotStartTimeRange is a required field
    +	FirstSlotStartTimeRange *SlotDateTimeRangeRequest `type:"structure" required:"true"`
    +
    +	// The maximum number of results to return in a single call. This value can
    +	// be between 5 and 300. The default value is 300. To retrieve the remaining
    +	// results, make another call with the returned NextToken value.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The maximum available duration, in hours. This value must be greater than
    +	// MinSlotDurationInHours and less than 1,720.
    +	MaxSlotDurationInHours *int64 `type:"integer"`
    +
    +	// The minimum available duration, in hours. The minimum required duration is
    +	// 1,200 hours per year. For example, the minimum daily schedule is 4 hours,
    +	// the minimum weekly schedule is 24 hours, and the minimum monthly schedule
    +	// is 100 hours.
    +	MinSlotDurationInHours *int64 `type:"integer"`
    +
    +	// The token for the next set of results.
    +	NextToken *string `type:"string"`
    +
    +	// The schedule recurrence.
    +	//
    +	// Recurrence is a required field
    +	Recurrence *ScheduledInstanceRecurrenceRequest `type:"structure" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeScheduledInstanceAvailabilityInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeScheduledInstanceAvailabilityInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeScheduledInstanceAvailabilityInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeScheduledInstanceAvailabilityInput"}
    +	if s.FirstSlotStartTimeRange == nil {
    +		invalidParams.Add(request.NewErrParamRequired("FirstSlotStartTimeRange"))
    +	}
    +	if s.Recurrence == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Recurrence"))
    +	}
    +	if s.FirstSlotStartTimeRange != nil {
    +		if err := s.FirstSlotStartTimeRange.Validate(); err != nil {
    +			invalidParams.AddNested("FirstSlotStartTimeRange", err.(request.ErrInvalidParams))
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetDryRun(v bool) *DescribeScheduledInstanceAvailabilityInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetFilters(v []*Filter) *DescribeScheduledInstanceAvailabilityInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetFirstSlotStartTimeRange sets the FirstSlotStartTimeRange field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetFirstSlotStartTimeRange(v *SlotDateTimeRangeRequest) *DescribeScheduledInstanceAvailabilityInput {
    +	s.FirstSlotStartTimeRange = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetMaxResults(v int64) *DescribeScheduledInstanceAvailabilityInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetMaxSlotDurationInHours sets the MaxSlotDurationInHours field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetMaxSlotDurationInHours(v int64) *DescribeScheduledInstanceAvailabilityInput {
    +	s.MaxSlotDurationInHours = &v
    +	return s
    +}
    +
    +// SetMinSlotDurationInHours sets the MinSlotDurationInHours field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetMinSlotDurationInHours(v int64) *DescribeScheduledInstanceAvailabilityInput {
    +	s.MinSlotDurationInHours = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetNextToken(v string) *DescribeScheduledInstanceAvailabilityInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetRecurrence sets the Recurrence field's value.
    +func (s *DescribeScheduledInstanceAvailabilityInput) SetRecurrence(v *ScheduledInstanceRecurrenceRequest) *DescribeScheduledInstanceAvailabilityInput {
    +	s.Recurrence = v
    +	return s
    +}
    +
    +// Contains the output of DescribeScheduledInstanceAvailability.
    +type DescribeScheduledInstanceAvailabilityOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token required to retrieve the next set of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the available Scheduled Instances.
    +	ScheduledInstanceAvailabilitySet []*ScheduledInstanceAvailability `locationName:"scheduledInstanceAvailabilitySet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeScheduledInstanceAvailabilityOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeScheduledInstanceAvailabilityOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeScheduledInstanceAvailabilityOutput) SetNextToken(v string) *DescribeScheduledInstanceAvailabilityOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetScheduledInstanceAvailabilitySet sets the ScheduledInstanceAvailabilitySet field's value.
    +func (s *DescribeScheduledInstanceAvailabilityOutput) SetScheduledInstanceAvailabilitySet(v []*ScheduledInstanceAvailability) *DescribeScheduledInstanceAvailabilityOutput {
    +	s.ScheduledInstanceAvailabilitySet = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeScheduledInstances.
    +type DescribeScheduledInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone - The Availability Zone (for example, us-west-2a).
    +	//
    +	//    * instance-type - The instance type (for example, c4.large).
    +	//
    +	//    * network-platform - The network platform (EC2-Classic or EC2-VPC).
    +	//
    +	//    * platform - The platform (Linux/UNIX or Windows).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of results to return in a single call. This value can
    +	// be between 5 and 300. The default value is 100. To retrieve the remaining
    +	// results, make another call with the returned NextToken value.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token for the next set of results.
    +	NextToken *string `type:"string"`
    +
    +	// One or more Scheduled Instance IDs.
    +	ScheduledInstanceIds []*string `locationName:"ScheduledInstanceId" locationNameList:"ScheduledInstanceId" type:"list"`
    +
    +	// The time period for the first schedule to start.
    +	SlotStartTimeRange *SlotStartTimeRangeRequest `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeScheduledInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeScheduledInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeScheduledInstancesInput) SetDryRun(v bool) *DescribeScheduledInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeScheduledInstancesInput) SetFilters(v []*Filter) *DescribeScheduledInstancesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeScheduledInstancesInput) SetMaxResults(v int64) *DescribeScheduledInstancesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeScheduledInstancesInput) SetNextToken(v string) *DescribeScheduledInstancesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetScheduledInstanceIds sets the ScheduledInstanceIds field's value.
    +func (s *DescribeScheduledInstancesInput) SetScheduledInstanceIds(v []*string) *DescribeScheduledInstancesInput {
    +	s.ScheduledInstanceIds = v
    +	return s
    +}
    +
    +// SetSlotStartTimeRange sets the SlotStartTimeRange field's value.
    +func (s *DescribeScheduledInstancesInput) SetSlotStartTimeRange(v *SlotStartTimeRangeRequest) *DescribeScheduledInstancesInput {
    +	s.SlotStartTimeRange = v
    +	return s
    +}
    +
    +// Contains the output of DescribeScheduledInstances.
    +type DescribeScheduledInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token required to retrieve the next set of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the Scheduled Instances.
    +	ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeScheduledInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeScheduledInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeScheduledInstancesOutput) SetNextToken(v string) *DescribeScheduledInstancesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetScheduledInstanceSet sets the ScheduledInstanceSet field's value.
    +func (s *DescribeScheduledInstancesOutput) SetScheduledInstanceSet(v []*ScheduledInstance) *DescribeScheduledInstancesOutput {
    +	s.ScheduledInstanceSet = v
    +	return s
    +}
    +
    +type DescribeSecurityGroupReferencesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the operation, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more security group IDs in your account.
    +	//
    +	// GroupId is a required field
    +	GroupId []*string `locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSecurityGroupReferencesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSecurityGroupReferencesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeSecurityGroupReferencesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupReferencesInput"}
    +	if s.GroupId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GroupId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSecurityGroupReferencesInput) SetDryRun(v bool) *DescribeSecurityGroupReferencesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *DescribeSecurityGroupReferencesInput) SetGroupId(v []*string) *DescribeSecurityGroupReferencesInput {
    +	s.GroupId = v
    +	return s
    +}
    +
    +type DescribeSecurityGroupReferencesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPCs with the referencing security groups.
    +	SecurityGroupReferenceSet []*SecurityGroupReference `locationName:"securityGroupReferenceSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSecurityGroupReferencesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSecurityGroupReferencesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSecurityGroupReferenceSet sets the SecurityGroupReferenceSet field's value.
    +func (s *DescribeSecurityGroupReferencesOutput) SetSecurityGroupReferenceSet(v []*SecurityGroupReference) *DescribeSecurityGroupReferencesOutput {
    +	s.SecurityGroupReferenceSet = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSecurityGroups.
    +type DescribeSecurityGroupsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters. If using multiple filters for rules, the results include
    +	// security groups for which any combination of rules - not necessarily a single
    +	// rule - match all filters.
    +	//
    +	//    * description - The description of the security group.
    +	//
    +	//    * egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service
    +	//    to which the security group allows access.
    +	//
    +	//    * group-id - The ID of the security group.
    +	//
    +	//    * group-name - The name of the security group.
    +	//
    +	//    * ip-permission.cidr - A CIDR range that has been granted permission.
    +	//
    +	//    * ip-permission.from-port - The start of port range for the TCP and UDP
    +	//    protocols, or an ICMP type number.
    +	//
    +	//    * ip-permission.group-id - The ID of a security group that has been granted
    +	//    permission.
    +	//
    +	//    * ip-permission.group-name - The name of a security group that has been
    +	//    granted permission.
    +	//
    +	//    * ip-permission.protocol - The IP protocol for the permission (tcp | udp
    +	//    | icmp or a protocol number).
    +	//
    +	//    * ip-permission.to-port - The end of port range for the TCP and UDP protocols,
    +	//    or an ICMP code.
    +	//
    +	//    * ip-permission.user-id - The ID of an AWS account that has been granted
    +	//    permission.
    +	//
    +	//    * owner-id - The AWS account ID of the owner of the security group.
    +	//
    +	//    * tag-key - The key of a tag assigned to the security group.
    +	//
    +	//    * tag-value - The value of a tag assigned to the security group.
    +	//
    +	//    * vpc-id - The ID of the VPC specified when the security group was created.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more security group IDs. Required for security groups in a nondefault
    +	// VPC.
    +	//
    +	// Default: Describes all your security groups.
    +	GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"`
    +
    +	// [EC2-Classic and default VPC only] One or more security group names. You
    +	// can specify either the security group name or the security group ID. For
    +	// security groups in a nondefault VPC, use the group-name filter to describe
    +	// security groups by name.
    +	//
    +	// Default: Describes all your security groups.
    +	GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSecurityGroupsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSecurityGroupsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSecurityGroupsInput) SetDryRun(v bool) *DescribeSecurityGroupsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeSecurityGroupsInput) SetFilters(v []*Filter) *DescribeSecurityGroupsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetGroupIds sets the GroupIds field's value.
    +func (s *DescribeSecurityGroupsInput) SetGroupIds(v []*string) *DescribeSecurityGroupsInput {
    +	s.GroupIds = v
    +	return s
    +}
    +
    +// SetGroupNames sets the GroupNames field's value.
    +func (s *DescribeSecurityGroupsInput) SetGroupNames(v []*string) *DescribeSecurityGroupsInput {
    +	s.GroupNames = v
    +	return s
    +}
    +
    +// Contains the output of DescribeSecurityGroups.
    +type DescribeSecurityGroupsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more security groups.
    +	SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSecurityGroupsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSecurityGroupsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSecurityGroups sets the SecurityGroups field's value.
    +func (s *DescribeSecurityGroupsOutput) SetSecurityGroups(v []*SecurityGroup) *DescribeSecurityGroupsOutput {
    +	s.SecurityGroups = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSnapshotAttribute.
    +type DescribeSnapshotAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The snapshot attribute you would like to view.
    +	//
    +	// Attribute is a required field
    +	Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the EBS snapshot.
    +	//
    +	// SnapshotId is a required field
    +	SnapshotId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSnapshotAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSnapshotAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeSnapshotAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeSnapshotAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.SnapshotId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SnapshotId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *DescribeSnapshotAttributeInput) SetAttribute(v string) *DescribeSnapshotAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSnapshotAttributeInput) SetDryRun(v bool) *DescribeSnapshotAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *DescribeSnapshotAttributeInput) SetSnapshotId(v string) *DescribeSnapshotAttributeInput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeSnapshotAttribute.
    +type DescribeSnapshotAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A list of permissions for creating volumes from the snapshot.
    +	CreateVolumePermissions []*CreateVolumePermission `locationName:"createVolumePermission" locationNameList:"item" type:"list"`
    +
    +	// A list of product codes.
    +	ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
    +
    +	// The ID of the EBS snapshot.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSnapshotAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSnapshotAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCreateVolumePermissions sets the CreateVolumePermissions field's value.
    +func (s *DescribeSnapshotAttributeOutput) SetCreateVolumePermissions(v []*CreateVolumePermission) *DescribeSnapshotAttributeOutput {
    +	s.CreateVolumePermissions = v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *DescribeSnapshotAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeSnapshotAttributeOutput {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *DescribeSnapshotAttributeOutput) SetSnapshotId(v string) *DescribeSnapshotAttributeOutput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSnapshots.
    +type DescribeSnapshotsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * description - A description of the snapshot.
    +	//
    +	//    * owner-alias - Value from an Amazon-maintained list (amazon | aws-marketplace
    +	//    | microsoft) of snapshot owners. Not to be confused with the user-configured
    +	//    AWS account alias, which is set from the IAM consolew.
    +	//
    +	//    * owner-id - The ID of the AWS account that owns the snapshot.
    +	//
    +	//    * progress - The progress of the snapshot, as a percentage (for example,
    +	//    80%).
    +	//
    +	//    * snapshot-id - The snapshot ID.
    +	//
    +	//    * start-time - The time stamp when the snapshot was initiated.
    +	//
    +	//    * status - The status of the snapshot (pending | completed | error).
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * volume-id - The ID of the volume the snapshot is for.
    +	//
    +	//    * volume-size - The size of the volume, in GiB.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of snapshot results returned by DescribeSnapshots in paginated
    +	// output. When this parameter is used, DescribeSnapshots only returns MaxResults
    +	// results in a single page along with a NextToken response element. The remaining
    +	// results of the initial request can be seen by sending another DescribeSnapshots
    +	// request with the returned NextToken value. This value can be between 5 and
    +	// 1000; if MaxResults is given a value larger than 1000, only 1000 results
    +	// are returned. If this parameter is not used, then DescribeSnapshots returns
    +	// all results. You cannot specify this parameter and the snapshot IDs parameter
    +	// in the same request.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The NextToken value returned from a previous paginated DescribeSnapshots
    +	// request where MaxResults was used and the results exceeded the value of that
    +	// parameter. Pagination continues from the end of the previous results that
    +	// returned the NextToken value. This value is null when there are no more results
    +	// to return.
    +	NextToken *string `type:"string"`
    +
    +	// Returns the snapshots owned by the specified owner. Multiple owners can be
    +	// specified.
    +	OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"`
    +
    +	// One or more AWS accounts IDs that can create volumes from the snapshot.
    +	RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"`
    +
    +	// One or more snapshot IDs.
    +	//
    +	// Default: Describes snapshots for which you have launch permissions.
    +	SnapshotIds []*string `locationName:"SnapshotId" locationNameList:"SnapshotId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSnapshotsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSnapshotsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSnapshotsInput) SetDryRun(v bool) *DescribeSnapshotsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeSnapshotsInput) SetFilters(v []*Filter) *DescribeSnapshotsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeSnapshotsInput) SetMaxResults(v int64) *DescribeSnapshotsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSnapshotsInput) SetNextToken(v string) *DescribeSnapshotsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetOwnerIds sets the OwnerIds field's value.
    +func (s *DescribeSnapshotsInput) SetOwnerIds(v []*string) *DescribeSnapshotsInput {
    +	s.OwnerIds = v
    +	return s
    +}
    +
    +// SetRestorableByUserIds sets the RestorableByUserIds field's value.
    +func (s *DescribeSnapshotsInput) SetRestorableByUserIds(v []*string) *DescribeSnapshotsInput {
    +	s.RestorableByUserIds = v
    +	return s
    +}
    +
    +// SetSnapshotIds sets the SnapshotIds field's value.
    +func (s *DescribeSnapshotsInput) SetSnapshotIds(v []*string) *DescribeSnapshotsInput {
    +	s.SnapshotIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeSnapshots.
    +type DescribeSnapshotsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The NextToken value to include in a future DescribeSnapshots request. When
    +	// the results of a DescribeSnapshots request exceed MaxResults, this value
    +	// can be used to retrieve the next page of results. This value is null when
    +	// there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the snapshots.
    +	Snapshots []*Snapshot `locationName:"snapshotSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSnapshotsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSnapshotsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSnapshotsOutput) SetNextToken(v string) *DescribeSnapshotsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSnapshots sets the Snapshots field's value.
    +func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshotsOutput {
    +	s.Snapshots = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSpotDatafeedSubscription.
    +type DescribeSpotDatafeedSubscriptionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotDatafeedSubscriptionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotDatafeedSubscriptionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSpotDatafeedSubscriptionInput) SetDryRun(v bool) *DescribeSpotDatafeedSubscriptionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeSpotDatafeedSubscription.
    +type DescribeSpotDatafeedSubscriptionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Spot instance data feed subscription.
    +	SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotDatafeedSubscriptionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotDatafeedSubscriptionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSpotDatafeedSubscription sets the SpotDatafeedSubscription field's value.
    +func (s *DescribeSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *SpotDatafeedSubscription) *DescribeSpotDatafeedSubscriptionOutput {
    +	s.SpotDatafeedSubscription = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSpotFleetInstances.
    +type DescribeSpotFleetInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The maximum number of results to return in a single call. Specify a value
    +	// between 1 and 1000. The default value is 1000. To retrieve the remaining
    +	// results, make another call with the returned NextToken value.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token for the next set of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotFleetInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotFleetInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeSpotFleetInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetInstancesInput"}
    +	if s.SpotFleetRequestId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSpotFleetInstancesInput) SetDryRun(v bool) *DescribeSpotFleetInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeSpotFleetInstancesInput) SetMaxResults(v int64) *DescribeSpotFleetInstancesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotFleetInstancesInput) SetNextToken(v string) *DescribeSpotFleetInstancesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *DescribeSpotFleetInstancesInput) SetSpotFleetRequestId(v string) *DescribeSpotFleetInstancesInput {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeSpotFleetInstances.
    +type DescribeSpotFleetInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The running instances. Note that this list is refreshed periodically and
    +	// might be out of date.
    +	//
    +	// ActiveInstances is a required field
    +	ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list" required:"true"`
    +
    +	// The token required to retrieve the next set of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotFleetInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotFleetInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetActiveInstances sets the ActiveInstances field's value.
    +func (s *DescribeSpotFleetInstancesOutput) SetActiveInstances(v []*ActiveInstance) *DescribeSpotFleetInstancesOutput {
    +	s.ActiveInstances = v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotFleetInstancesOutput) SetNextToken(v string) *DescribeSpotFleetInstancesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *DescribeSpotFleetInstancesOutput) SetSpotFleetRequestId(v string) *DescribeSpotFleetInstancesOutput {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSpotFleetRequestHistory.
    +type DescribeSpotFleetRequestHistoryInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The type of events to describe. By default, all events are described.
    +	EventType *string `locationName:"eventType" type:"string" enum:"EventType"`
    +
    +	// The maximum number of results to return in a single call. Specify a value
    +	// between 1 and 1000. The default value is 1000. To retrieve the remaining
    +	// results, make another call with the returned NextToken value.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token for the next set of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +
    +	// The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	//
    +	// StartTime is a required field
    +	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotFleetRequestHistoryInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotFleetRequestHistoryInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeSpotFleetRequestHistoryInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetRequestHistoryInput"}
    +	if s.SpotFleetRequestId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId"))
    +	}
    +	if s.StartTime == nil {
    +		invalidParams.Add(request.NewErrParamRequired("StartTime"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSpotFleetRequestHistoryInput) SetDryRun(v bool) *DescribeSpotFleetRequestHistoryInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEventType sets the EventType field's value.
    +func (s *DescribeSpotFleetRequestHistoryInput) SetEventType(v string) *DescribeSpotFleetRequestHistoryInput {
    +	s.EventType = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeSpotFleetRequestHistoryInput) SetMaxResults(v int64) *DescribeSpotFleetRequestHistoryInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotFleetRequestHistoryInput) SetNextToken(v string) *DescribeSpotFleetRequestHistoryInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *DescribeSpotFleetRequestHistoryInput) SetSpotFleetRequestId(v string) *DescribeSpotFleetRequestHistoryInput {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// SetStartTime sets the StartTime field's value.
    +func (s *DescribeSpotFleetRequestHistoryInput) SetStartTime(v time.Time) *DescribeSpotFleetRequestHistoryInput {
    +	s.StartTime = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeSpotFleetRequestHistory.
    +type DescribeSpotFleetRequestHistoryOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the events in the history of the Spot fleet request.
    +	//
    +	// HistoryRecords is a required field
    +	HistoryRecords []*HistoryRecord `locationName:"historyRecordSet" locationNameList:"item" type:"list" required:"true"`
    +
    +	// The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	// All records up to this time were retrieved.
    +	//
    +	// If nextToken indicates that there are more results, this value is not present.
    +	//
    +	// LastEvaluatedTime is a required field
    +	LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +
    +	// The token required to retrieve the next set of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +
    +	// The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	//
    +	// StartTime is a required field
    +	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotFleetRequestHistoryOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotFleetRequestHistoryOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetHistoryRecords sets the HistoryRecords field's value.
    +func (s *DescribeSpotFleetRequestHistoryOutput) SetHistoryRecords(v []*HistoryRecord) *DescribeSpotFleetRequestHistoryOutput {
    +	s.HistoryRecords = v
    +	return s
    +}
    +
    +// SetLastEvaluatedTime sets the LastEvaluatedTime field's value.
    +func (s *DescribeSpotFleetRequestHistoryOutput) SetLastEvaluatedTime(v time.Time) *DescribeSpotFleetRequestHistoryOutput {
    +	s.LastEvaluatedTime = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotFleetRequestHistoryOutput) SetNextToken(v string) *DescribeSpotFleetRequestHistoryOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *DescribeSpotFleetRequestHistoryOutput) SetSpotFleetRequestId(v string) *DescribeSpotFleetRequestHistoryOutput {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// SetStartTime sets the StartTime field's value.
    +func (s *DescribeSpotFleetRequestHistoryOutput) SetStartTime(v time.Time) *DescribeSpotFleetRequestHistoryOutput {
    +	s.StartTime = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSpotFleetRequests.
    +type DescribeSpotFleetRequestsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The maximum number of results to return in a single call. Specify a value
    +	// between 1 and 1000. The default value is 1000. To retrieve the remaining
    +	// results, make another call with the returned NextToken value.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token for the next set of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The IDs of the Spot fleet requests.
    +	SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotFleetRequestsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotFleetRequestsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSpotFleetRequestsInput) SetDryRun(v bool) *DescribeSpotFleetRequestsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeSpotFleetRequestsInput) SetMaxResults(v int64) *DescribeSpotFleetRequestsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotFleetRequestsInput) SetNextToken(v string) *DescribeSpotFleetRequestsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestIds sets the SpotFleetRequestIds field's value.
    +func (s *DescribeSpotFleetRequestsInput) SetSpotFleetRequestIds(v []*string) *DescribeSpotFleetRequestsInput {
    +	s.SpotFleetRequestIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeSpotFleetRequests.
    +type DescribeSpotFleetRequestsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token required to retrieve the next set of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the configuration of your Spot fleet.
    +	//
    +	// SpotFleetRequestConfigs is a required field
    +	SpotFleetRequestConfigs []*SpotFleetRequestConfig `locationName:"spotFleetRequestConfigSet" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotFleetRequestsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotFleetRequestsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotFleetRequestsOutput) SetNextToken(v string) *DescribeSpotFleetRequestsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestConfigs sets the SpotFleetRequestConfigs field's value.
    +func (s *DescribeSpotFleetRequestsOutput) SetSpotFleetRequestConfigs(v []*SpotFleetRequestConfig) *DescribeSpotFleetRequestsOutput {
    +	s.SpotFleetRequestConfigs = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSpotInstanceRequests.
    +type DescribeSpotInstanceRequestsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone-group - The Availability Zone group.
    +	//
    +	//    * create-time - The time stamp when the Spot instance request was created.
    +	//
    +	//    * fault-code - The fault code related to the request.
    +	//
    +	//    * fault-message - The fault message related to the request.
    +	//
    +	//    * instance-id - The ID of the instance that fulfilled the request.
    +	//
    +	//    * launch-group - The Spot instance launch group.
    +	//
    +	//    * launch.block-device-mapping.delete-on-termination - Indicates whether
    +	//    the Amazon EBS volume is deleted on instance termination.
    +	//
    +	//    * launch.block-device-mapping.device-name - The device name for the Amazon
    +	//    EBS volume (for example, /dev/sdh).
    +	//
    +	//    * launch.block-device-mapping.snapshot-id - The ID of the snapshot used
    +	//    for the Amazon EBS volume.
    +	//
    +	//    * launch.block-device-mapping.volume-size - The size of the Amazon EBS
    +	//    volume, in GiB.
    +	//
    +	//    * launch.block-device-mapping.volume-type - The type of the Amazon EBS
    +	//    volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1
    +	//    for Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic.
    +	//
    +	//    * launch.group-id - The security group for the instance.
    +	//
    +	//    * launch.image-id - The ID of the AMI.
    +	//
    +	//    * launch.instance-type - The type of instance (for example, m3.medium).
    +	//
    +	//    * launch.kernel-id - The kernel ID.
    +	//
    +	//    * launch.key-name - The name of the key pair the instance launched with.
    +	//
    +	//    * launch.monitoring-enabled - Whether monitoring is enabled for the Spot
    +	//    instance.
    +	//
    +	//    * launch.ramdisk-id - The RAM disk ID.
    +	//
    +	//    * network-interface.network-interface-id - The ID of the network interface.
    +	//
    +	//    * network-interface.device-index - The index of the device for the network
    +	//    interface attachment on the instance.
    +	//
    +	//    * network-interface.subnet-id - The ID of the subnet for the instance.
    +	//
    +	//    * network-interface.description - A description of the network interface.
    +	//
    +	//    * network-interface.private-ip-address - The primary private IP address
    +	//    of the network interface.
    +	//
    +	//    * network-interface.delete-on-termination - Indicates whether the network
    +	//    interface is deleted when the instance is terminated.
    +	//
    +	//    * network-interface.group-id - The ID of the security group associated
    +	//    with the network interface.
    +	//
    +	//    * network-interface.group-name - The name of the security group associated
    +	//    with the network interface.
    +	//
    +	//    * network-interface.addresses.primary - Indicates whether the IP address
    +	//    is the primary private IP address.
    +	//
    +	//    * product-description - The product description associated with the instance
    +	//    (Linux/UNIX | Windows).
    +	//
    +	//    * spot-instance-request-id - The Spot instance request ID.
    +	//
    +	//    * spot-price - The maximum hourly price for any Spot instance launched
    +	//    to fulfill the request.
    +	//
    +	//    * state - The state of the Spot instance request (open | active | closed
    +	//    | cancelled | failed). Spot bid status information can help you track
    +	//    your Amazon EC2 Spot instance requests. For more information, see Spot
    +	//    Bid Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html)
    +	//    in the Amazon Elastic Compute Cloud User Guide.
    +	//
    +	//    * status-code - The short code describing the most recent evaluation of
    +	//    your Spot instance request.
    +	//
    +	//    * status-message - The message explaining the status of the Spot instance
    +	//    request.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * type - The type of Spot instance request (one-time | persistent).
    +	//
    +	//    * launched-availability-zone - The Availability Zone in which the bid
    +	//    is launched.
    +	//
    +	//    * valid-from - The start date of the request.
    +	//
    +	//    * valid-until - The end date of the request.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more Spot instance request IDs.
    +	SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotInstanceRequestsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotInstanceRequestsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSpotInstanceRequestsInput) SetDryRun(v bool) *DescribeSpotInstanceRequestsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeSpotInstanceRequestsInput) SetFilters(v []*Filter) *DescribeSpotInstanceRequestsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetSpotInstanceRequestIds sets the SpotInstanceRequestIds field's value.
    +func (s *DescribeSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*string) *DescribeSpotInstanceRequestsInput {
    +	s.SpotInstanceRequestIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeSpotInstanceRequests.
    +type DescribeSpotInstanceRequestsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more Spot instance requests.
    +	SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotInstanceRequestsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotInstanceRequestsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSpotInstanceRequests sets the SpotInstanceRequests field's value.
    +func (s *DescribeSpotInstanceRequestsOutput) SetSpotInstanceRequests(v []*SpotInstanceRequest) *DescribeSpotInstanceRequestsOutput {
    +	s.SpotInstanceRequests = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSpotPriceHistory.
    +type DescribeSpotPriceHistoryInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Filters the results by the specified Availability Zone.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The date and time, up to the current date, from which to stop retrieving
    +	// the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// One or more filters.
    +	//
    +	//    * availability-zone - The Availability Zone for which prices should be
    +	//    returned.
    +	//
    +	//    * instance-type - The type of instance (for example, m3.medium).
    +	//
    +	//    * product-description - The product description for the Spot price (Linux/UNIX
    +	//    | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon
    +	//    VPC) | Windows (Amazon VPC)).
    +	//
    +	//    * spot-price - The Spot price. The value must match exactly (or use wildcards;
    +	//    greater than or less than comparison is not supported).
    +	//
    +	//    * timestamp - The timestamp of the Spot price history, in UTC format (for
    +	//    example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater
    +	//    than or less than comparison is not supported.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// Filters the results by the specified instance types.
    +	InstanceTypes []*string `locationName:"InstanceType" type:"list"`
    +
    +	// The maximum number of results to return in a single call. Specify a value
    +	// between 1 and 1000. The default value is 1000. To retrieve the remaining
    +	// results, make another call with the returned NextToken value.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token for the next set of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Filters the results by the specified basic product descriptions.
    +	ProductDescriptions []*string `locationName:"ProductDescription" type:"list"`
    +
    +	// The date and time, up to the past 90 days, from which to start retrieving
    +	// the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotPriceHistoryInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotPriceHistoryInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetAvailabilityZone(v string) *DescribeSpotPriceHistoryInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetDryRun(v bool) *DescribeSpotPriceHistoryInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEndTime sets the EndTime field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetEndTime(v time.Time) *DescribeSpotPriceHistoryInput {
    +	s.EndTime = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetFilters(v []*Filter) *DescribeSpotPriceHistoryInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetInstanceTypes sets the InstanceTypes field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetInstanceTypes(v []*string) *DescribeSpotPriceHistoryInput {
    +	s.InstanceTypes = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetMaxResults(v int64) *DescribeSpotPriceHistoryInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetNextToken(v string) *DescribeSpotPriceHistoryInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetProductDescriptions sets the ProductDescriptions field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetProductDescriptions(v []*string) *DescribeSpotPriceHistoryInput {
    +	s.ProductDescriptions = v
    +	return s
    +}
    +
    +// SetStartTime sets the StartTime field's value.
    +func (s *DescribeSpotPriceHistoryInput) SetStartTime(v time.Time) *DescribeSpotPriceHistoryInput {
    +	s.StartTime = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeSpotPriceHistory.
    +type DescribeSpotPriceHistoryOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token required to retrieve the next set of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// The historical Spot prices.
    +	SpotPriceHistory []*SpotPrice `locationName:"spotPriceHistorySet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSpotPriceHistoryOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSpotPriceHistoryOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeSpotPriceHistoryOutput) SetNextToken(v string) *DescribeSpotPriceHistoryOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetSpotPriceHistory sets the SpotPriceHistory field's value.
    +func (s *DescribeSpotPriceHistoryOutput) SetSpotPriceHistory(v []*SpotPrice) *DescribeSpotPriceHistoryOutput {
    +	s.SpotPriceHistory = v
    +	return s
    +}
    +
    +type DescribeStaleSecurityGroupsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the operation, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The maximum number of items to return for this request. The request returns
    +	// a token that you can specify in a subsequent call to get the next set of
    +	// results.
    +	MaxResults *int64 `min:"5" type:"integer"`
    +
    +	// The token for the next set of items to return. (You received this token from
    +	// a prior call.)
    +	NextToken *string `min:"1" type:"string"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeStaleSecurityGroupsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeStaleSecurityGroupsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeStaleSecurityGroupsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeStaleSecurityGroupsInput"}
    +	if s.MaxResults != nil && *s.MaxResults < 5 {
    +		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
    +	}
    +	if s.NextToken != nil && len(*s.NextToken) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeStaleSecurityGroupsInput) SetDryRun(v bool) *DescribeStaleSecurityGroupsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeStaleSecurityGroupsInput) SetMaxResults(v int64) *DescribeStaleSecurityGroupsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeStaleSecurityGroupsInput) SetNextToken(v string) *DescribeStaleSecurityGroupsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DescribeStaleSecurityGroupsInput) SetVpcId(v string) *DescribeStaleSecurityGroupsInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +type DescribeStaleSecurityGroupsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use when requesting the next set of items. If there are no additional
    +	// items to return, the string is empty.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the stale security groups.
    +	StaleSecurityGroupSet []*StaleSecurityGroup `locationName:"staleSecurityGroupSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeStaleSecurityGroupsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeStaleSecurityGroupsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeStaleSecurityGroupsOutput) SetNextToken(v string) *DescribeStaleSecurityGroupsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetStaleSecurityGroupSet sets the StaleSecurityGroupSet field's value.
    +func (s *DescribeStaleSecurityGroupsOutput) SetStaleSecurityGroupSet(v []*StaleSecurityGroup) *DescribeStaleSecurityGroupsOutput {
    +	s.StaleSecurityGroupSet = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeSubnets.
    +type DescribeSubnetsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * availabilityZone - The Availability Zone for the subnet. You can also
    +	//    use availability-zone as the filter name.
    +	//
    +	//    * available-ip-address-count - The number of IP addresses in the subnet
    +	//    that are available.
    +	//
    +	//    * cidrBlock - The CIDR block of the subnet. The CIDR block you specify
    +	//    must exactly match the subnet's CIDR block for information to be returned
    +	//    for the subnet. You can also use cidr or cidr-block as the filter names.
    +	//
    +	//    * defaultForAz - Indicates whether this is the default subnet for the
    +	//    Availability Zone. You can also use default-for-az as the filter name.
    +	//
    +	//    * state - The state of the subnet (pending | available).
    +	//
    +	//    * subnet-id - The ID of the subnet.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-id - The ID of the VPC for the subnet.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more subnet IDs.
    +	//
    +	// Default: Describes all your subnets.
    +	SubnetIds []*string `locationName:"SubnetId" locationNameList:"SubnetId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSubnetsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSubnetsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeSubnetsInput) SetDryRun(v bool) *DescribeSubnetsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeSubnetsInput) SetFilters(v []*Filter) *DescribeSubnetsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetSubnetIds sets the SubnetIds field's value.
    +func (s *DescribeSubnetsInput) SetSubnetIds(v []*string) *DescribeSubnetsInput {
    +	s.SubnetIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeSubnets.
    +type DescribeSubnetsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more subnets.
    +	Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeSubnetsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeSubnetsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSubnets sets the Subnets field's value.
    +func (s *DescribeSubnetsOutput) SetSubnets(v []*Subnet) *DescribeSubnetsOutput {
    +	s.Subnets = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeTags.
    +type DescribeTagsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * key - The tag key.
    +	//
    +	//    * resource-id - The resource ID.
    +	//
    +	//    * resource-type - The resource type (customer-gateway | dhcp-options |
    +	//    image | instance | internet-gateway | network-acl | network-interface
    +	//    | reserved-instances | route-table | security-group | snapshot | spot-instances-request
    +	//    | subnet | volume | vpc | vpn-connection | vpn-gateway).
    +	//
    +	//    * value - The tag value.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of results to return in a single call. This value can
    +	// be between 5 and 1000. To retrieve the remaining results, make another call
    +	// with the returned NextToken value.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The token to retrieve the next page of results.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeTagsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeTagsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeTagsInput) SetDryRun(v bool) *DescribeTagsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeTagsInput) SetFilters(v []*Filter) *DescribeTagsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeTagsInput) SetMaxResults(v int64) *DescribeTagsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeTagsInput) SetNextToken(v string) *DescribeTagsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeTags.
    +type DescribeTagsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return..
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// A list of tags.
    +	Tags []*TagDescription `locationName:"tagSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeTagsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeTagsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeTagsOutput) SetNextToken(v string) *DescribeTagsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *DescribeTagsOutput) SetTags(v []*TagDescription) *DescribeTagsOutput {
    +	s.Tags = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVolumeAttribute.
    +type DescribeVolumeAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The instance attribute.
    +	Attribute *string `type:"string" enum:"VolumeAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the volume.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVolumeAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVolumeAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeVolumeAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeVolumeAttributeInput"}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *DescribeVolumeAttributeInput) SetAttribute(v string) *DescribeVolumeAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVolumeAttributeInput) SetDryRun(v bool) *DescribeVolumeAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *DescribeVolumeAttributeInput) SetVolumeId(v string) *DescribeVolumeAttributeInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeVolumeAttribute.
    +type DescribeVolumeAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The state of autoEnableIO attribute.
    +	AutoEnableIO *AttributeBooleanValue `locationName:"autoEnableIO" type:"structure"`
    +
    +	// A list of product codes.
    +	ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
    +
    +	// The ID of the volume.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVolumeAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVolumeAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAutoEnableIO sets the AutoEnableIO field's value.
    +func (s *DescribeVolumeAttributeOutput) SetAutoEnableIO(v *AttributeBooleanValue) *DescribeVolumeAttributeOutput {
    +	s.AutoEnableIO = v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *DescribeVolumeAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeVolumeAttributeOutput {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *DescribeVolumeAttributeOutput) SetVolumeId(v string) *DescribeVolumeAttributeOutput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVolumeStatus.
    +type DescribeVolumeStatusInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * action.code - The action code for the event (for example, enable-volume-io).
    +	//
    +	//    * action.description - A description of the action.
    +	//
    +	//    * action.event-id - The event ID associated with the action.
    +	//
    +	//    * availability-zone - The Availability Zone of the instance.
    +	//
    +	//    * event.description - A description of the event.
    +	//
    +	//    * event.event-id - The event ID.
    +	//
    +	//    * event.event-type - The event type (for io-enabled: passed | failed;
    +	//    for io-performance: io-performance:degraded | io-performance:severely-degraded
    +	//    | io-performance:stalled).
    +	//
    +	//    * event.not-after - The latest end time for the event.
    +	//
    +	//    * event.not-before - The earliest start time for the event.
    +	//
    +	//    * volume-status.details-name - The cause for volume-status.status (io-enabled
    +	//    | io-performance).
    +	//
    +	//    * volume-status.details-status - The status of volume-status.details-name
    +	//    (for io-enabled: passed | failed; for io-performance: normal | degraded
    +	//    | severely-degraded | stalled).
    +	//
    +	//    * volume-status.status - The status of the volume (ok | impaired | warning
    +	//    | insufficient-data).
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of volume results returned by DescribeVolumeStatus in
    +	// paginated output. When this parameter is used, the request only returns MaxResults
    +	// results in a single page along with a NextToken response element. The remaining
    +	// results of the initial request can be seen by sending another request with
    +	// the returned NextToken value. This value can be between 5 and 1000; if MaxResults
    +	// is given a value larger than 1000, only 1000 results are returned. If this
    +	// parameter is not used, then DescribeVolumeStatus returns all results. You
    +	// cannot specify this parameter and the volume IDs parameter in the same request.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The NextToken value to include in a future DescribeVolumeStatus request.
    +	// When the results of the request exceed MaxResults, this value can be used
    +	// to retrieve the next page of results. This value is null when there are no
    +	// more results to return.
    +	NextToken *string `type:"string"`
    +
    +	// One or more volume IDs.
    +	//
    +	// Default: Describes all your volumes.
    +	VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVolumeStatusInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVolumeStatusInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVolumeStatusInput) SetDryRun(v bool) *DescribeVolumeStatusInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVolumeStatusInput) SetFilters(v []*Filter) *DescribeVolumeStatusInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeVolumeStatusInput) SetMaxResults(v int64) *DescribeVolumeStatusInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVolumeStatusInput) SetNextToken(v string) *DescribeVolumeStatusInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVolumeIds sets the VolumeIds field's value.
    +func (s *DescribeVolumeStatusInput) SetVolumeIds(v []*string) *DescribeVolumeStatusInput {
    +	s.VolumeIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVolumeStatus.
    +type DescribeVolumeStatusOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use to retrieve the next page of results. This value is null
    +	// when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// A list of volumes.
    +	VolumeStatuses []*VolumeStatusItem `locationName:"volumeStatusSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVolumeStatusOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVolumeStatusOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVolumeStatusOutput) SetNextToken(v string) *DescribeVolumeStatusOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVolumeStatuses sets the VolumeStatuses field's value.
    +func (s *DescribeVolumeStatusOutput) SetVolumeStatuses(v []*VolumeStatusItem) *DescribeVolumeStatusOutput {
    +	s.VolumeStatuses = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVolumes.
    +type DescribeVolumesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * attachment.attach-time - The time stamp when the attachment initiated.
    +	//
    +	//    * attachment.delete-on-termination - Whether the volume is deleted on
    +	//    instance termination.
    +	//
    +	//    * attachment.device - The device name that is exposed to the instance
    +	//    (for example, /dev/sda1).
    +	//
    +	//    * attachment.instance-id - The ID of the instance the volume is attached
    +	//    to.
    +	//
    +	//    * attachment.status - The attachment state (attaching | attached | detaching
    +	//    | detached).
    +	//
    +	//    * availability-zone - The Availability Zone in which the volume was created.
    +	//
    +	//    * create-time - The time stamp when the volume was created.
    +	//
    +	//    * encrypted - The encryption status of the volume.
    +	//
    +	//    * size - The size of the volume, in GiB.
    +	//
    +	//    * snapshot-id - The snapshot from which the volume was created.
    +	//
    +	//    * status - The status of the volume (creating | available | in-use | deleting
    +	//    | deleted | error).
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * volume-id - The volume ID.
    +	//
    +	//    * volume-type - The Amazon EBS volume type. This can be gp2 for General
    +	//    Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized
    +	//    HDD, sc1 for Cold HDD, or standard for Magnetic volumes.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of volume results returned by DescribeVolumes in paginated
    +	// output. When this parameter is used, DescribeVolumes only returns MaxResults
    +	// results in a single page along with a NextToken response element. The remaining
    +	// results of the initial request can be seen by sending another DescribeVolumes
    +	// request with the returned NextToken value. This value can be between 5 and
    +	// 1000; if MaxResults is given a value larger than 1000, only 1000 results
    +	// are returned. If this parameter is not used, then DescribeVolumes returns
    +	// all results. You cannot specify this parameter and the volume IDs parameter
    +	// in the same request.
    +	MaxResults *int64 `locationName:"maxResults" type:"integer"`
    +
    +	// The NextToken value returned from a previous paginated DescribeVolumes request
    +	// where MaxResults was used and the results exceeded the value of that parameter.
    +	// Pagination continues from the end of the previous results that returned the
    +	// NextToken value. This value is null when there are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// One or more volume IDs.
    +	VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVolumesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVolumesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVolumesInput) SetDryRun(v bool) *DescribeVolumesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVolumesInput) SetFilters(v []*Filter) *DescribeVolumesInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeVolumesInput) SetMaxResults(v int64) *DescribeVolumesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVolumesInput) SetNextToken(v string) *DescribeVolumesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVolumeIds sets the VolumeIds field's value.
    +func (s *DescribeVolumesInput) SetVolumeIds(v []*string) *DescribeVolumesInput {
    +	s.VolumeIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVolumes.
    +type DescribeVolumesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The NextToken value to include in a future DescribeVolumes request. When
    +	// the results of a DescribeVolumes request exceed MaxResults, this value can
    +	// be used to retrieve the next page of results. This value is null when there
    +	// are no more results to return.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the volumes.
    +	Volumes []*Volume `locationName:"volumeSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVolumesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVolumesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVolumesOutput) SetNextToken(v string) *DescribeVolumesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVolumes sets the Volumes field's value.
    +func (s *DescribeVolumesOutput) SetVolumes(v []*Volume) *DescribeVolumesOutput {
    +	s.Volumes = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcAttribute.
    +type DescribeVpcAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The VPC attribute.
    +	//
    +	// Attribute is a required field
    +	Attribute *string `type:"string" required:"true" enum:"VpcAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeVpcAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeVpcAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *DescribeVpcAttributeInput) SetAttribute(v string) *DescribeVpcAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpcAttributeInput) SetDryRun(v bool) *DescribeVpcAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DescribeVpcAttributeInput) SetVpcId(v string) *DescribeVpcAttributeInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcAttribute.
    +type DescribeVpcAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the instances launched in the VPC get DNS hostnames. If
    +	// this attribute is true, instances in the VPC get DNS hostnames; otherwise,
    +	// they do not.
    +	EnableDnsHostnames *AttributeBooleanValue `locationName:"enableDnsHostnames" type:"structure"`
    +
    +	// Indicates whether DNS resolution is enabled for the VPC. If this attribute
    +	// is true, the Amazon DNS server resolves DNS hostnames for your instances
    +	// to their corresponding IP addresses; otherwise, it does not.
    +	EnableDnsSupport *AttributeBooleanValue `locationName:"enableDnsSupport" type:"structure"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEnableDnsHostnames sets the EnableDnsHostnames field's value.
    +func (s *DescribeVpcAttributeOutput) SetEnableDnsHostnames(v *AttributeBooleanValue) *DescribeVpcAttributeOutput {
    +	s.EnableDnsHostnames = v
    +	return s
    +}
    +
    +// SetEnableDnsSupport sets the EnableDnsSupport field's value.
    +func (s *DescribeVpcAttributeOutput) SetEnableDnsSupport(v *AttributeBooleanValue) *DescribeVpcAttributeOutput {
    +	s.EnableDnsSupport = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DescribeVpcAttributeOutput) SetVpcId(v string) *DescribeVpcAttributeOutput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcClassicLinkDnsSupport.
    +type DescribeVpcClassicLinkDnsSupportInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The maximum number of items to return for this request. The request returns
    +	// a token that you can specify in a subsequent call to get the next set of
    +	// results.
    +	MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"`
    +
    +	// The token for the next set of items to return. (You received this token from
    +	// a prior call.)
    +	NextToken *string `locationName:"nextToken" min:"1" type:"string"`
    +
    +	// One or more VPC IDs.
    +	VpcIds []*string `locationNameList:"VpcId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcClassicLinkDnsSupportInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcClassicLinkDnsSupportInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DescribeVpcClassicLinkDnsSupportInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DescribeVpcClassicLinkDnsSupportInput"}
    +	if s.MaxResults != nil && *s.MaxResults < 5 {
    +		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
    +	}
    +	if s.NextToken != nil && len(*s.NextToken) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeVpcClassicLinkDnsSupportInput) SetMaxResults(v int64) *DescribeVpcClassicLinkDnsSupportInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVpcClassicLinkDnsSupportInput) SetNextToken(v string) *DescribeVpcClassicLinkDnsSupportInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVpcIds sets the VpcIds field's value.
    +func (s *DescribeVpcClassicLinkDnsSupportInput) SetVpcIds(v []*string) *DescribeVpcClassicLinkDnsSupportInput {
    +	s.VpcIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcClassicLinkDnsSupport.
    +type DescribeVpcClassicLinkDnsSupportOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use when requesting the next set of items.
    +	NextToken *string `locationName:"nextToken" min:"1" type:"string"`
    +
    +	// Information about the ClassicLink DNS support status of the VPCs.
    +	Vpcs []*ClassicLinkDnsSupport `locationName:"vpcs" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcClassicLinkDnsSupportOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcClassicLinkDnsSupportOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVpcClassicLinkDnsSupportOutput) SetNextToken(v string) *DescribeVpcClassicLinkDnsSupportOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVpcs sets the Vpcs field's value.
    +func (s *DescribeVpcClassicLinkDnsSupportOutput) SetVpcs(v []*ClassicLinkDnsSupport) *DescribeVpcClassicLinkDnsSupportOutput {
    +	s.Vpcs = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcClassicLink.
    +type DescribeVpcClassicLinkInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * is-classic-link-enabled - Whether the VPC is enabled for ClassicLink
    +	//    (true | false).
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more VPCs for which you want to describe the ClassicLink status.
    +	VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcClassicLinkInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcClassicLinkInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpcClassicLinkInput) SetDryRun(v bool) *DescribeVpcClassicLinkInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVpcClassicLinkInput) SetFilters(v []*Filter) *DescribeVpcClassicLinkInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetVpcIds sets the VpcIds field's value.
    +func (s *DescribeVpcClassicLinkInput) SetVpcIds(v []*string) *DescribeVpcClassicLinkInput {
    +	s.VpcIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcClassicLink.
    +type DescribeVpcClassicLinkOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ClassicLink status of one or more VPCs.
    +	Vpcs []*VpcClassicLink `locationName:"vpcSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcClassicLinkOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcClassicLinkOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcs sets the Vpcs field's value.
    +func (s *DescribeVpcClassicLinkOutput) SetVpcs(v []*VpcClassicLink) *DescribeVpcClassicLinkOutput {
    +	s.Vpcs = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcEndpointServices.
    +type DescribeVpcEndpointServicesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The maximum number of items to return for this request. The request returns
    +	// a token that you can specify in a subsequent call to get the next set of
    +	// results.
    +	//
    +	// Constraint: If the value is greater than 1000, we return only 1000 items.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token for the next set of items to return. (You received this token from
    +	// a prior call.)
    +	NextToken *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcEndpointServicesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcEndpointServicesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpcEndpointServicesInput) SetDryRun(v bool) *DescribeVpcEndpointServicesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeVpcEndpointServicesInput) SetMaxResults(v int64) *DescribeVpcEndpointServicesInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVpcEndpointServicesInput) SetNextToken(v string) *DescribeVpcEndpointServicesInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcEndpointServices.
    +type DescribeVpcEndpointServicesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use when requesting the next set of items. If there are no additional
    +	// items to return, the string is empty.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// A list of supported AWS services.
    +	ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcEndpointServicesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcEndpointServicesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVpcEndpointServicesOutput) SetNextToken(v string) *DescribeVpcEndpointServicesOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetServiceNames sets the ServiceNames field's value.
    +func (s *DescribeVpcEndpointServicesOutput) SetServiceNames(v []*string) *DescribeVpcEndpointServicesOutput {
    +	s.ServiceNames = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcEndpoints.
    +type DescribeVpcEndpointsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * service-name: The name of the AWS service.
    +	//
    +	//    * vpc-id: The ID of the VPC in which the endpoint resides.
    +	//
    +	//    * vpc-endpoint-id: The ID of the endpoint.
    +	//
    +	//    * vpc-endpoint-state: The state of the endpoint. (pending | available
    +	//    | deleting | deleted)
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// The maximum number of items to return for this request. The request returns
    +	// a token that you can specify in a subsequent call to get the next set of
    +	// results.
    +	//
    +	// Constraint: If the value is greater than 1000, we return only 1000 items.
    +	MaxResults *int64 `type:"integer"`
    +
    +	// The token for the next set of items to return. (You received this token from
    +	// a prior call.)
    +	NextToken *string `type:"string"`
    +
    +	// One or more endpoint IDs.
    +	VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcEndpointsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcEndpointsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpcEndpointsInput) SetDryRun(v bool) *DescribeVpcEndpointsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVpcEndpointsInput) SetFilters(v []*Filter) *DescribeVpcEndpointsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetMaxResults sets the MaxResults field's value.
    +func (s *DescribeVpcEndpointsInput) SetMaxResults(v int64) *DescribeVpcEndpointsInput {
    +	s.MaxResults = &v
    +	return s
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVpcEndpointsInput) SetNextToken(v string) *DescribeVpcEndpointsInput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVpcEndpointIds sets the VpcEndpointIds field's value.
    +func (s *DescribeVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DescribeVpcEndpointsInput {
    +	s.VpcEndpointIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcEndpoints.
    +type DescribeVpcEndpointsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The token to use when requesting the next set of items. If there are no additional
    +	// items to return, the string is empty.
    +	NextToken *string `locationName:"nextToken" type:"string"`
    +
    +	// Information about the endpoints.
    +	VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcEndpointsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcEndpointsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNextToken sets the NextToken field's value.
    +func (s *DescribeVpcEndpointsOutput) SetNextToken(v string) *DescribeVpcEndpointsOutput {
    +	s.NextToken = &v
    +	return s
    +}
    +
    +// SetVpcEndpoints sets the VpcEndpoints field's value.
    +func (s *DescribeVpcEndpointsOutput) SetVpcEndpoints(v []*VpcEndpoint) *DescribeVpcEndpointsOutput {
    +	s.VpcEndpoints = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcPeeringConnections.
    +type DescribeVpcPeeringConnectionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * accepter-vpc-info.cidr-block - The CIDR block of the peer VPC.
    +	//
    +	//    * accepter-vpc-info.owner-id - The AWS account ID of the owner of the
    +	//    peer VPC.
    +	//
    +	//    * accepter-vpc-info.vpc-id - The ID of the peer VPC.
    +	//
    +	//    * expiration-time - The expiration date and time for the VPC peering connection.
    +	//
    +	//    * requester-vpc-info.cidr-block - The CIDR block of the requester's VPC.
    +	//
    +	//    * requester-vpc-info.owner-id - The AWS account ID of the owner of the
    +	//    requester VPC.
    +	//
    +	//    * requester-vpc-info.vpc-id - The ID of the requester VPC.
    +	//
    +	//    * status-code - The status of the VPC peering connection (pending-acceptance
    +	//    | failed | expired | provisioning | active | deleted | rejected).
    +	//
    +	//    * status-message - A message that provides more information about the
    +	//    status of the VPC peering connection, if applicable.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-peering-connection-id - The ID of the VPC peering connection.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more VPC peering connection IDs.
    +	//
    +	// Default: Describes all your VPC peering connections.
    +	VpcPeeringConnectionIds []*string `locationName:"VpcPeeringConnectionId" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcPeeringConnectionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcPeeringConnectionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpcPeeringConnectionsInput) SetDryRun(v bool) *DescribeVpcPeeringConnectionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVpcPeeringConnectionsInput) SetFilters(v []*Filter) *DescribeVpcPeeringConnectionsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionIds sets the VpcPeeringConnectionIds field's value.
    +func (s *DescribeVpcPeeringConnectionsInput) SetVpcPeeringConnectionIds(v []*string) *DescribeVpcPeeringConnectionsInput {
    +	s.VpcPeeringConnectionIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcPeeringConnections.
    +type DescribeVpcPeeringConnectionsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPC peering connections.
    +	VpcPeeringConnections []*VpcPeeringConnection `locationName:"vpcPeeringConnectionSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcPeeringConnectionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcPeeringConnectionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcPeeringConnections sets the VpcPeeringConnections field's value.
    +func (s *DescribeVpcPeeringConnectionsOutput) SetVpcPeeringConnections(v []*VpcPeeringConnection) *DescribeVpcPeeringConnectionsOutput {
    +	s.VpcPeeringConnections = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpcs.
    +type DescribeVpcsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * cidr - The CIDR block of the VPC. The CIDR block you specify must exactly
    +	//    match the VPC's CIDR block for information to be returned for the VPC.
    +	//    Must contain the slash followed by one or two digits (for example, /28).
    +	//
    +	//    * dhcp-options-id - The ID of a set of DHCP options.
    +	//
    +	//    * isDefault - Indicates whether the VPC is the default VPC.
    +	//
    +	//    * state - The state of the VPC (pending | available).
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * vpc-id - The ID of the VPC.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more VPC IDs.
    +	//
    +	// Default: Describes all your VPCs.
    +	VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpcsInput) SetDryRun(v bool) *DescribeVpcsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVpcsInput) SetFilters(v []*Filter) *DescribeVpcsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetVpcIds sets the VpcIds field's value.
    +func (s *DescribeVpcsInput) SetVpcIds(v []*string) *DescribeVpcsInput {
    +	s.VpcIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpcs.
    +type DescribeVpcsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more VPCs.
    +	Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpcsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpcsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcs sets the Vpcs field's value.
    +func (s *DescribeVpcsOutput) SetVpcs(v []*Vpc) *DescribeVpcsOutput {
    +	s.Vpcs = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpnConnections.
    +type DescribeVpnConnectionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * customer-gateway-configuration - The configuration information for the
    +	//    customer gateway.
    +	//
    +	//    * customer-gateway-id - The ID of a customer gateway associated with the
    +	//    VPN connection.
    +	//
    +	//    * state - The state of the VPN connection (pending | available | deleting
    +	//    | deleted).
    +	//
    +	//    * option.static-routes-only - Indicates whether the connection has static
    +	//    routes only. Used for devices that do not support Border Gateway Protocol
    +	//    (BGP).
    +	//
    +	//    * route.destination-cidr-block - The destination CIDR block. This corresponds
    +	//    to the subnet used in a customer data center.
    +	//
    +	//    * bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP
    +	//    device.
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * type - The type of VPN connection. Currently the only supported type
    +	//    is ipsec.1.
    +	//
    +	//    * vpn-connection-id - The ID of the VPN connection.
    +	//
    +	//    * vpn-gateway-id - The ID of a virtual private gateway associated with
    +	//    the VPN connection.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more VPN connection IDs.
    +	//
    +	// Default: Describes your VPN connections.
    +	VpnConnectionIds []*string `locationName:"VpnConnectionId" locationNameList:"VpnConnectionId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpnConnectionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpnConnectionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpnConnectionsInput) SetDryRun(v bool) *DescribeVpnConnectionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVpnConnectionsInput) SetFilters(v []*Filter) *DescribeVpnConnectionsInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetVpnConnectionIds sets the VpnConnectionIds field's value.
    +func (s *DescribeVpnConnectionsInput) SetVpnConnectionIds(v []*string) *DescribeVpnConnectionsInput {
    +	s.VpnConnectionIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpnConnections.
    +type DescribeVpnConnectionsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more VPN connections.
    +	VpnConnections []*VpnConnection `locationName:"vpnConnectionSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpnConnectionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpnConnectionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpnConnections sets the VpnConnections field's value.
    +func (s *DescribeVpnConnectionsOutput) SetVpnConnections(v []*VpnConnection) *DescribeVpnConnectionsOutput {
    +	s.VpnConnections = v
    +	return s
    +}
    +
    +// Contains the parameters for DescribeVpnGateways.
    +type DescribeVpnGatewaysInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more filters.
    +	//
    +	//    * attachment.state - The current state of the attachment between the gateway
    +	//    and the VPC (attaching | attached | detaching | detached).
    +	//
    +	//    * attachment.vpc-id - The ID of an attached VPC.
    +	//
    +	//    * availability-zone - The Availability Zone for the virtual private gateway
    +	//    (if applicable).
    +	//
    +	//    * state - The state of the virtual private gateway (pending | available
    +	//    | deleting | deleted).
    +	//
    +	//    * tag:key=value - The key/value combination of a tag assigned to the resource.
    +	//
    +	//    * tag-key - The key of a tag assigned to the resource. This filter is
    +	//    independent of the tag-value filter. For example, if you use both the
    +	//    filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
    +	//    assigned both the tag key Purpose (regardless of what the tag's value
    +	//    is), and the tag value X (regardless of what the tag's key is). If you
    +	//    want to list only resources where Purpose is X, see the tag:key=value
    +	//    filter.
    +	//
    +	//    * tag-value - The value of a tag assigned to the resource. This filter
    +	//    is independent of the tag-key filter.
    +	//
    +	//    * type - The type of virtual private gateway. Currently the only supported
    +	//    type is ipsec.1.
    +	//
    +	//    * vpn-gateway-id - The ID of the virtual private gateway.
    +	Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
    +
    +	// One or more virtual private gateway IDs.
    +	//
    +	// Default: Describes all your virtual private gateways.
    +	VpnGatewayIds []*string `locationName:"VpnGatewayId" locationNameList:"VpnGatewayId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpnGatewaysInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpnGatewaysInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DescribeVpnGatewaysInput) SetDryRun(v bool) *DescribeVpnGatewaysInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFilters sets the Filters field's value.
    +func (s *DescribeVpnGatewaysInput) SetFilters(v []*Filter) *DescribeVpnGatewaysInput {
    +	s.Filters = v
    +	return s
    +}
    +
    +// SetVpnGatewayIds sets the VpnGatewayIds field's value.
    +func (s *DescribeVpnGatewaysInput) SetVpnGatewayIds(v []*string) *DescribeVpnGatewaysInput {
    +	s.VpnGatewayIds = v
    +	return s
    +}
    +
    +// Contains the output of DescribeVpnGateways.
    +type DescribeVpnGatewaysOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more virtual private gateways.
    +	VpnGateways []*VpnGateway `locationName:"vpnGatewaySet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DescribeVpnGatewaysOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DescribeVpnGatewaysOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpnGateways sets the VpnGateways field's value.
    +func (s *DescribeVpnGatewaysOutput) SetVpnGateways(v []*VpnGateway) *DescribeVpnGatewaysOutput {
    +	s.VpnGateways = v
    +	return s
    +}
    +
    +// Contains the parameters for DetachClassicLinkVpc.
    +type DetachClassicLinkVpcInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance to unlink from the VPC.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// The ID of the VPC to which the instance is linked.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DetachClassicLinkVpcInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachClassicLinkVpcInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DetachClassicLinkVpcInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DetachClassicLinkVpcInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DetachClassicLinkVpcInput) SetDryRun(v bool) *DetachClassicLinkVpcInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *DetachClassicLinkVpcInput) SetInstanceId(v string) *DetachClassicLinkVpcInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DetachClassicLinkVpcInput) SetVpcId(v string) *DetachClassicLinkVpcInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of DetachClassicLinkVpc.
    +type DetachClassicLinkVpcOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DetachClassicLinkVpcOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachClassicLinkVpcOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *DetachClassicLinkVpcOutput) SetReturn(v bool) *DetachClassicLinkVpcOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for DetachInternetGateway.
    +type DetachInternetGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the Internet gateway.
    +	//
    +	// InternetGatewayId is a required field
    +	InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DetachInternetGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachInternetGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DetachInternetGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DetachInternetGatewayInput"}
    +	if s.InternetGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InternetGatewayId"))
    +	}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DetachInternetGatewayInput) SetDryRun(v bool) *DetachInternetGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInternetGatewayId sets the InternetGatewayId field's value.
    +func (s *DetachInternetGatewayInput) SetInternetGatewayId(v string) *DetachInternetGatewayInput {
    +	s.InternetGatewayId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DetachInternetGatewayInput) SetVpcId(v string) *DetachInternetGatewayInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +type DetachInternetGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DetachInternetGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachInternetGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DetachNetworkInterface.
    +type DetachNetworkInterfaceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the attachment.
    +	//
    +	// AttachmentId is a required field
    +	AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Specifies whether to force a detachment.
    +	Force *bool `locationName:"force" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DetachNetworkInterfaceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachNetworkInterfaceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DetachNetworkInterfaceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DetachNetworkInterfaceInput"}
    +	if s.AttachmentId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AttachmentId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttachmentId sets the AttachmentId field's value.
    +func (s *DetachNetworkInterfaceInput) SetAttachmentId(v string) *DetachNetworkInterfaceInput {
    +	s.AttachmentId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DetachNetworkInterfaceInput) SetDryRun(v bool) *DetachNetworkInterfaceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetForce sets the Force field's value.
    +func (s *DetachNetworkInterfaceInput) SetForce(v bool) *DetachNetworkInterfaceInput {
    +	s.Force = &v
    +	return s
    +}
    +
    +type DetachNetworkInterfaceOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DetachNetworkInterfaceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachNetworkInterfaceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DetachVolume.
    +type DetachVolumeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The device name.
    +	Device *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Forces detachment if the previous detachment attempt did not occur cleanly
    +	// (for example, logging into an instance, unmounting the volume, and detaching
    +	// normally). This option can lead to data loss or a corrupted file system.
    +	// Use this option only as a last resort to detach a volume from a failed instance.
    +	// The instance won't have an opportunity to flush file system caches or file
    +	// system metadata. If you use this option, you must perform file system check
    +	// and repair procedures.
    +	Force *bool `type:"boolean"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `type:"string"`
    +
    +	// The ID of the volume.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DetachVolumeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachVolumeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DetachVolumeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DetachVolumeInput"}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDevice sets the Device field's value.
    +func (s *DetachVolumeInput) SetDevice(v string) *DetachVolumeInput {
    +	s.Device = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DetachVolumeInput) SetDryRun(v bool) *DetachVolumeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetForce sets the Force field's value.
    +func (s *DetachVolumeInput) SetForce(v bool) *DetachVolumeInput {
    +	s.Force = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *DetachVolumeInput) SetInstanceId(v string) *DetachVolumeInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *DetachVolumeInput) SetVolumeId(v string) *DetachVolumeInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Contains the parameters for DetachVpnGateway.
    +type DetachVpnGatewayInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `type:"string" required:"true"`
    +
    +	// The ID of the virtual private gateway.
    +	//
    +	// VpnGatewayId is a required field
    +	VpnGatewayId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DetachVpnGatewayInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachVpnGatewayInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DetachVpnGatewayInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DetachVpnGatewayInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +	if s.VpnGatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpnGatewayId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DetachVpnGatewayInput) SetDryRun(v bool) *DetachVpnGatewayInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DetachVpnGatewayInput) SetVpcId(v string) *DetachVpnGatewayInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// SetVpnGatewayId sets the VpnGatewayId field's value.
    +func (s *DetachVpnGatewayInput) SetVpnGatewayId(v string) *DetachVpnGatewayInput {
    +	s.VpnGatewayId = &v
    +	return s
    +}
    +
    +type DetachVpnGatewayOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DetachVpnGatewayOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DetachVpnGatewayOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Describes a DHCP configuration option.
    +type DhcpConfiguration struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of a DHCP option.
    +	Key *string `locationName:"key" type:"string"`
    +
    +	// One or more values for the DHCP option.
    +	Values []*AttributeValue `locationName:"valueSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DhcpConfiguration) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DhcpConfiguration) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKey sets the Key field's value.
    +func (s *DhcpConfiguration) SetKey(v string) *DhcpConfiguration {
    +	s.Key = &v
    +	return s
    +}
    +
    +// SetValues sets the Values field's value.
    +func (s *DhcpConfiguration) SetValues(v []*AttributeValue) *DhcpConfiguration {
    +	s.Values = v
    +	return s
    +}
    +
    +// Describes a set of DHCP options.
    +type DhcpOptions struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more DHCP options in the set.
    +	DhcpConfigurations []*DhcpConfiguration `locationName:"dhcpConfigurationSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the set of DHCP options.
    +	DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"`
    +
    +	// Any tags assigned to the DHCP options set.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s DhcpOptions) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DhcpOptions) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDhcpConfigurations sets the DhcpConfigurations field's value.
    +func (s *DhcpOptions) SetDhcpConfigurations(v []*DhcpConfiguration) *DhcpOptions {
    +	s.DhcpConfigurations = v
    +	return s
    +}
    +
    +// SetDhcpOptionsId sets the DhcpOptionsId field's value.
    +func (s *DhcpOptions) SetDhcpOptionsId(v string) *DhcpOptions {
    +	s.DhcpOptionsId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *DhcpOptions) SetTags(v []*Tag) *DhcpOptions {
    +	s.Tags = v
    +	return s
    +}
    +
    +// Contains the parameters for DisableVgwRoutePropagation.
    +type DisableVgwRoutePropagationInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the virtual private gateway.
    +	//
    +	// GatewayId is a required field
    +	GatewayId *string `type:"string" required:"true"`
    +
    +	// The ID of the route table.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DisableVgwRoutePropagationInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisableVgwRoutePropagationInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DisableVgwRoutePropagationInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DisableVgwRoutePropagationInput"}
    +	if s.GatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GatewayId"))
    +	}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetGatewayId sets the GatewayId field's value.
    +func (s *DisableVgwRoutePropagationInput) SetGatewayId(v string) *DisableVgwRoutePropagationInput {
    +	s.GatewayId = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *DisableVgwRoutePropagationInput) SetRouteTableId(v string) *DisableVgwRoutePropagationInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +type DisableVgwRoutePropagationOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DisableVgwRoutePropagationOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisableVgwRoutePropagationOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DisableVpcClassicLinkDnsSupport.
    +type DisableVpcClassicLinkDnsSupportInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DisableVpcClassicLinkDnsSupportInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisableVpcClassicLinkDnsSupportInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DisableVpcClassicLinkDnsSupportInput) SetVpcId(v string) *DisableVpcClassicLinkDnsSupportInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of DisableVpcClassicLinkDnsSupport.
    +type DisableVpcClassicLinkDnsSupportOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DisableVpcClassicLinkDnsSupportOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisableVpcClassicLinkDnsSupportOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *DisableVpcClassicLinkDnsSupportOutput) SetReturn(v bool) *DisableVpcClassicLinkDnsSupportOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for DisableVpcClassicLink.
    +type DisableVpcClassicLinkInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DisableVpcClassicLinkInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisableVpcClassicLinkInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DisableVpcClassicLinkInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DisableVpcClassicLinkInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DisableVpcClassicLinkInput) SetDryRun(v bool) *DisableVpcClassicLinkInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *DisableVpcClassicLinkInput) SetVpcId(v string) *DisableVpcClassicLinkInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of DisableVpcClassicLink.
    +type DisableVpcClassicLinkOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DisableVpcClassicLinkOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisableVpcClassicLinkOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *DisableVpcClassicLinkOutput) SetReturn(v bool) *DisableVpcClassicLinkOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for DisassociateAddress.
    +type DisassociateAddressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-VPC] The association ID. Required for EC2-VPC.
    +	AssociationId *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// [EC2-Classic] The Elastic IP address. Required for EC2-Classic.
    +	PublicIp *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DisassociateAddressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisassociateAddressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *DisassociateAddressInput) SetAssociationId(v string) *DisassociateAddressInput {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DisassociateAddressInput) SetDryRun(v bool) *DisassociateAddressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *DisassociateAddressInput) SetPublicIp(v string) *DisassociateAddressInput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +type DisassociateAddressOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DisassociateAddressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisassociateAddressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for DisassociateRouteTable.
    +type DisassociateRouteTableInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The association ID representing the current association between the route
    +	// table and subnet.
    +	//
    +	// AssociationId is a required field
    +	AssociationId *string `locationName:"associationId" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s DisassociateRouteTableInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisassociateRouteTableInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DisassociateRouteTableInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DisassociateRouteTableInput"}
    +	if s.AssociationId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AssociationId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *DisassociateRouteTableInput) SetAssociationId(v string) *DisassociateRouteTableInput {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *DisassociateRouteTableInput) SetDryRun(v bool) *DisassociateRouteTableInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +type DisassociateRouteTableOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DisassociateRouteTableOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DisassociateRouteTableOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Describes a disk image.
    +type DiskImage struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the disk image.
    +	Description *string `type:"string"`
    +
    +	// Information about the disk image.
    +	Image *DiskImageDetail `type:"structure"`
    +
    +	// Information about the volume.
    +	Volume *VolumeDetail `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s DiskImage) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DiskImage) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DiskImage) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DiskImage"}
    +	if s.Image != nil {
    +		if err := s.Image.Validate(); err != nil {
    +			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
    +		}
    +	}
    +	if s.Volume != nil {
    +		if err := s.Volume.Validate(); err != nil {
    +			invalidParams.AddNested("Volume", err.(request.ErrInvalidParams))
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *DiskImage) SetDescription(v string) *DiskImage {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetImage sets the Image field's value.
    +func (s *DiskImage) SetImage(v *DiskImageDetail) *DiskImage {
    +	s.Image = v
    +	return s
    +}
    +
    +// SetVolume sets the Volume field's value.
    +func (s *DiskImage) SetVolume(v *VolumeDetail) *DiskImage {
    +	s.Volume = v
    +	return s
    +}
    +
    +// Describes a disk image.
    +type DiskImageDescription struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The checksum computed for the disk image.
    +	Checksum *string `locationName:"checksum" type:"string"`
    +
    +	// The disk image format.
    +	//
    +	// Format is a required field
    +	Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"`
    +
    +	// A presigned URL for the import manifest stored in Amazon S3. For information
    +	// about creating a presigned URL for an Amazon S3 object, read the "Query String
    +	// Request Authentication Alternative" section of the Authenticating REST Requests
    +	// (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
    +	// topic in the Amazon Simple Storage Service Developer Guide.
    +	//
    +	// For information about the import manifest referenced by this API action,
    +	// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
    +	//
    +	// ImportManifestUrl is a required field
    +	ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"`
    +
    +	// The size of the disk image, in GiB.
    +	//
    +	// Size is a required field
    +	Size *int64 `locationName:"size" type:"long" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DiskImageDescription) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DiskImageDescription) GoString() string {
    +	return s.String()
    +}
    +
    +// SetChecksum sets the Checksum field's value.
    +func (s *DiskImageDescription) SetChecksum(v string) *DiskImageDescription {
    +	s.Checksum = &v
    +	return s
    +}
    +
    +// SetFormat sets the Format field's value.
    +func (s *DiskImageDescription) SetFormat(v string) *DiskImageDescription {
    +	s.Format = &v
    +	return s
    +}
    +
    +// SetImportManifestUrl sets the ImportManifestUrl field's value.
    +func (s *DiskImageDescription) SetImportManifestUrl(v string) *DiskImageDescription {
    +	s.ImportManifestUrl = &v
    +	return s
    +}
    +
    +// SetSize sets the Size field's value.
    +func (s *DiskImageDescription) SetSize(v int64) *DiskImageDescription {
    +	s.Size = &v
    +	return s
    +}
    +
    +// Describes a disk image.
    +type DiskImageDetail struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The size of the disk image, in GiB.
    +	//
    +	// Bytes is a required field
    +	Bytes *int64 `locationName:"bytes" type:"long" required:"true"`
    +
    +	// The disk image format.
    +	//
    +	// Format is a required field
    +	Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"`
    +
    +	// A presigned URL for the import manifest stored in Amazon S3 and presented
    +	// here as an Amazon S3 presigned URL. For information about creating a presigned
    +	// URL for an Amazon S3 object, read the "Query String Request Authentication
    +	// Alternative" section of the Authenticating REST Requests (http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
    +	// topic in the Amazon Simple Storage Service Developer Guide.
    +	//
    +	// For information about the import manifest referenced by this API action,
    +	// see VM Import Manifest (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html).
    +	//
    +	// ImportManifestUrl is a required field
    +	ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DiskImageDetail) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DiskImageDetail) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DiskImageDetail) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DiskImageDetail"}
    +	if s.Bytes == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Bytes"))
    +	}
    +	if s.Format == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Format"))
    +	}
    +	if s.ImportManifestUrl == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImportManifestUrl"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetBytes sets the Bytes field's value.
    +func (s *DiskImageDetail) SetBytes(v int64) *DiskImageDetail {
    +	s.Bytes = &v
    +	return s
    +}
    +
    +// SetFormat sets the Format field's value.
    +func (s *DiskImageDetail) SetFormat(v string) *DiskImageDetail {
    +	s.Format = &v
    +	return s
    +}
    +
    +// SetImportManifestUrl sets the ImportManifestUrl field's value.
    +func (s *DiskImageDetail) SetImportManifestUrl(v string) *DiskImageDetail {
    +	s.ImportManifestUrl = &v
    +	return s
    +}
    +
    +// Describes a disk image volume.
    +type DiskImageVolumeDescription struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The volume identifier.
    +	//
    +	// Id is a required field
    +	Id *string `locationName:"id" type:"string" required:"true"`
    +
    +	// The size of the volume, in GiB.
    +	Size *int64 `locationName:"size" type:"long"`
    +}
    +
    +// String returns the string representation
    +func (s DiskImageVolumeDescription) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DiskImageVolumeDescription) GoString() string {
    +	return s.String()
    +}
    +
    +// SetId sets the Id field's value.
    +func (s *DiskImageVolumeDescription) SetId(v string) *DiskImageVolumeDescription {
    +	s.Id = &v
    +	return s
    +}
    +
    +// SetSize sets the Size field's value.
    +func (s *DiskImageVolumeDescription) SetSize(v int64) *DiskImageVolumeDescription {
    +	s.Size = &v
    +	return s
    +}
    +
    +// Describes a block device for an EBS volume.
    +type EbsBlockDevice struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the EBS volume is deleted on instance termination.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes
    +	// may only be attached to instances that support Amazon EBS encryption.
    +	Encrypted *bool `locationName:"encrypted" type:"boolean"`
    +
    +	// The number of I/O operations per second (IOPS) that the volume supports.
    +	// For io1, this represents the number of IOPS that are provisioned for the
    +	// volume. For gp2, this represents the baseline performance of the volume and
    +	// the rate at which the volume accumulates I/O credits for bursting. For more
    +	// information about General Purpose SSD baseline performance, I/O credits,
    +	// and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	//
    +	// Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for
    +	// gp2 volumes.
    +	//
    +	// Condition: This parameter is required for requests to create io1 volumes;
    +	// it is not used in requests to create gp2, st1, sc1, or standard volumes.
    +	Iops *int64 `locationName:"iops" type:"integer"`
    +
    +	// The ID of the snapshot.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +
    +	// The size of the volume, in GiB.
    +	//
    +	// Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned
    +	// IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for
    +	// Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify
    +	// a snapshot, the volume size must be equal to or larger than the snapshot
    +	// size.
    +	//
    +	// Default: If you're creating the volume from a snapshot and don't specify
    +	// a volume size, the default is the snapshot size.
    +	VolumeSize *int64 `locationName:"volumeSize" type:"integer"`
    +
    +	// The volume type: gp2, io1, st1, sc1, or standard.
    +	//
    +	// Default: standard
    +	VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"`
    +}
    +
    +// String returns the string representation
    +func (s EbsBlockDevice) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EbsBlockDevice) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *EbsBlockDevice) SetDeleteOnTermination(v bool) *EbsBlockDevice {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *EbsBlockDevice) SetEncrypted(v bool) *EbsBlockDevice {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetIops sets the Iops field's value.
    +func (s *EbsBlockDevice) SetIops(v int64) *EbsBlockDevice {
    +	s.Iops = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *EbsBlockDevice) SetSnapshotId(v string) *EbsBlockDevice {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetVolumeSize sets the VolumeSize field's value.
    +func (s *EbsBlockDevice) SetVolumeSize(v int64) *EbsBlockDevice {
    +	s.VolumeSize = &v
    +	return s
    +}
    +
    +// SetVolumeType sets the VolumeType field's value.
    +func (s *EbsBlockDevice) SetVolumeType(v string) *EbsBlockDevice {
    +	s.VolumeType = &v
    +	return s
    +}
    +
    +// Describes a parameter used to set up an EBS volume in a block device mapping.
    +type EbsInstanceBlockDevice struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The time stamp when the attachment initiated.
    +	AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Indicates whether the volume is deleted on instance termination.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// The attachment state.
    +	Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"`
    +
    +	// The ID of the EBS volume.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s EbsInstanceBlockDevice) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EbsInstanceBlockDevice) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachTime sets the AttachTime field's value.
    +func (s *EbsInstanceBlockDevice) SetAttachTime(v time.Time) *EbsInstanceBlockDevice {
    +	s.AttachTime = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *EbsInstanceBlockDevice) SetDeleteOnTermination(v bool) *EbsInstanceBlockDevice {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *EbsInstanceBlockDevice) SetStatus(v string) *EbsInstanceBlockDevice {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *EbsInstanceBlockDevice) SetVolumeId(v string) *EbsInstanceBlockDevice {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Describes information used to set up an EBS volume specified in a block device
    +// mapping.
    +type EbsInstanceBlockDeviceSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the volume is deleted on instance termination.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// The ID of the EBS volume.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s EbsInstanceBlockDeviceSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EbsInstanceBlockDeviceSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *EbsInstanceBlockDeviceSpecification) SetDeleteOnTermination(v bool) *EbsInstanceBlockDeviceSpecification {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *EbsInstanceBlockDeviceSpecification) SetVolumeId(v string) *EbsInstanceBlockDeviceSpecification {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Contains the parameters for EnableVgwRoutePropagation.
    +type EnableVgwRoutePropagationInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the virtual private gateway.
    +	//
    +	// GatewayId is a required field
    +	GatewayId *string `type:"string" required:"true"`
    +
    +	// The ID of the route table.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVgwRoutePropagationInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVgwRoutePropagationInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *EnableVgwRoutePropagationInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "EnableVgwRoutePropagationInput"}
    +	if s.GatewayId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GatewayId"))
    +	}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetGatewayId sets the GatewayId field's value.
    +func (s *EnableVgwRoutePropagationInput) SetGatewayId(v string) *EnableVgwRoutePropagationInput {
    +	s.GatewayId = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *EnableVgwRoutePropagationInput) SetRouteTableId(v string) *EnableVgwRoutePropagationInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +type EnableVgwRoutePropagationOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVgwRoutePropagationOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVgwRoutePropagationOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for EnableVolumeIO.
    +type EnableVolumeIOInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the volume.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `locationName:"volumeId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVolumeIOInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVolumeIOInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *EnableVolumeIOInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "EnableVolumeIOInput"}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *EnableVolumeIOInput) SetDryRun(v bool) *EnableVolumeIOInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *EnableVolumeIOInput) SetVolumeId(v string) *EnableVolumeIOInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +type EnableVolumeIOOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVolumeIOOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVolumeIOOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for EnableVpcClassicLinkDnsSupport.
    +type EnableVpcClassicLinkDnsSupportInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVpcClassicLinkDnsSupportInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVpcClassicLinkDnsSupportInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *EnableVpcClassicLinkDnsSupportInput) SetVpcId(v string) *EnableVpcClassicLinkDnsSupportInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of EnableVpcClassicLinkDnsSupport.
    +type EnableVpcClassicLinkDnsSupportOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVpcClassicLinkDnsSupportOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVpcClassicLinkDnsSupportOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *EnableVpcClassicLinkDnsSupportOutput) SetReturn(v bool) *EnableVpcClassicLinkDnsSupportOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for EnableVpcClassicLink.
    +type EnableVpcClassicLinkInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVpcClassicLinkInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVpcClassicLinkInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *EnableVpcClassicLinkInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "EnableVpcClassicLinkInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *EnableVpcClassicLinkInput) SetDryRun(v bool) *EnableVpcClassicLinkInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *EnableVpcClassicLinkInput) SetVpcId(v string) *EnableVpcClassicLinkInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the output of EnableVpcClassicLink.
    +type EnableVpcClassicLinkOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s EnableVpcClassicLinkOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EnableVpcClassicLinkOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Describes a Spot fleet event.
    +type EventInformation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The description of the event.
    +	EventDescription *string `locationName:"eventDescription" type:"string"`
    +
    +	// The event.
    +	//
    +	// The following are the error events.
    +	//
    +	//    * iamFleetRoleInvalid - The Spot fleet did not have the required permissions
    +	//    either to launch or terminate an instance.
    +	//
    +	//    * launchSpecTemporarilyBlacklisted - The configuration is not valid and
    +	//    several attempts to launch instances have failed. For more information,
    +	//    see the description of the event.
    +	//
    +	//    * spotFleetRequestConfigurationInvalid - The configuration is not valid.
    +	//    For more information, see the description of the event.
    +	//
    +	//    * spotInstanceCountLimitExceeded - You've reached the limit on the number
    +	//    of Spot instances that you can launch.
    +	//
    +	// The following are the fleetRequestChange events.
    +	//
    +	//    * active - The Spot fleet has been validated and Amazon EC2 is attempting
    +	//    to maintain the target number of running Spot instances.
    +	//
    +	//    * cancelled - The Spot fleet is canceled and has no running Spot instances.
    +	//    The Spot fleet will be deleted two days after its instances were terminated.
    +	//
    +	//    * cancelled_running - The Spot fleet is canceled and will not launch additional
    +	//    Spot instances, but its existing Spot instances continue to run until
    +	//    they are interrupted or terminated.
    +	//
    +	//    * cancelled_terminating - The Spot fleet is canceled and its Spot instances
    +	//    are terminating.
    +	//
    +	//    * expired - The Spot fleet request has expired. A subsequent event indicates
    +	//    that the instances were terminated, if the request was created with TerminateInstancesWithExpiration
    +	//    set.
    +	//
    +	//    * modify_in_progress - A request to modify the Spot fleet request was
    +	//    accepted and is in progress.
    +	//
    +	//    * modify_successful - The Spot fleet request was modified.
    +	//
    +	//    * price_update - The bid price for a launch configuration was adjusted
    +	//    because it was too high. This change is permanent.
    +	//
    +	//    * submitted - The Spot fleet request is being evaluated and Amazon EC2
    +	//    is preparing to launch the target number of Spot instances.
    +	//
    +	// The following are the instanceChange events.
    +	//
    +	//    * launched - A bid was fulfilled and a new instance was launched.
    +	//
    +	//    * terminated - An instance was terminated by the user.
    +	EventSubType *string `locationName:"eventSubType" type:"string"`
    +
    +	// The ID of the instance. This information is available only for instanceChange
    +	// events.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s EventInformation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s EventInformation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEventDescription sets the EventDescription field's value.
    +func (s *EventInformation) SetEventDescription(v string) *EventInformation {
    +	s.EventDescription = &v
    +	return s
    +}
    +
    +// SetEventSubType sets the EventSubType field's value.
    +func (s *EventInformation) SetEventSubType(v string) *EventInformation {
    +	s.EventSubType = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *EventInformation) SetInstanceId(v string) *EventInformation {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// Describes an instance export task.
    +type ExportTask struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the resource being exported.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of the export task.
    +	ExportTaskId *string `locationName:"exportTaskId" type:"string"`
    +
    +	// Information about the export task.
    +	ExportToS3Task *ExportToS3Task `locationName:"exportToS3" type:"structure"`
    +
    +	// Information about the instance to export.
    +	InstanceExportDetails *InstanceExportDetails `locationName:"instanceExport" type:"structure"`
    +
    +	// The state of the export task.
    +	State *string `locationName:"state" type:"string" enum:"ExportTaskState"`
    +
    +	// The status message related to the export task.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ExportTask) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ExportTask) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ExportTask) SetDescription(v string) *ExportTask {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetExportTaskId sets the ExportTaskId field's value.
    +func (s *ExportTask) SetExportTaskId(v string) *ExportTask {
    +	s.ExportTaskId = &v
    +	return s
    +}
    +
    +// SetExportToS3Task sets the ExportToS3Task field's value.
    +func (s *ExportTask) SetExportToS3Task(v *ExportToS3Task) *ExportTask {
    +	s.ExportToS3Task = v
    +	return s
    +}
    +
    +// SetInstanceExportDetails sets the InstanceExportDetails field's value.
    +func (s *ExportTask) SetInstanceExportDetails(v *InstanceExportDetails) *ExportTask {
    +	s.InstanceExportDetails = v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *ExportTask) SetState(v string) *ExportTask {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ExportTask) SetStatusMessage(v string) *ExportTask {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// Describes the format and location for an instance export task.
    +type ExportToS3Task struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The container format used to combine disk images with metadata (such as OVF).
    +	// If absent, only the disk image is exported.
    +	ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"`
    +
    +	// The format for the exported image.
    +	DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"`
    +
    +	// The S3 bucket for the destination image. The destination bucket must exist
    +	// and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.
    +	S3Bucket *string `locationName:"s3Bucket" type:"string"`
    +
    +	// The encryption key for your S3 bucket.
    +	S3Key *string `locationName:"s3Key" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ExportToS3Task) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ExportToS3Task) GoString() string {
    +	return s.String()
    +}
    +
    +// SetContainerFormat sets the ContainerFormat field's value.
    +func (s *ExportToS3Task) SetContainerFormat(v string) *ExportToS3Task {
    +	s.ContainerFormat = &v
    +	return s
    +}
    +
    +// SetDiskImageFormat sets the DiskImageFormat field's value.
    +func (s *ExportToS3Task) SetDiskImageFormat(v string) *ExportToS3Task {
    +	s.DiskImageFormat = &v
    +	return s
    +}
    +
    +// SetS3Bucket sets the S3Bucket field's value.
    +func (s *ExportToS3Task) SetS3Bucket(v string) *ExportToS3Task {
    +	s.S3Bucket = &v
    +	return s
    +}
    +
    +// SetS3Key sets the S3Key field's value.
    +func (s *ExportToS3Task) SetS3Key(v string) *ExportToS3Task {
    +	s.S3Key = &v
    +	return s
    +}
    +
    +// Describes an instance export task.
    +type ExportToS3TaskSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The container format used to combine disk images with metadata (such as OVF).
    +	// If absent, only the disk image is exported.
    +	ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"`
    +
    +	// The format for the exported image.
    +	DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"`
    +
    +	// The S3 bucket for the destination image. The destination bucket must exist
    +	// and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.
    +	S3Bucket *string `locationName:"s3Bucket" type:"string"`
    +
    +	// The image is written to a single object in the S3 bucket at the S3 key s3prefix
    +	// + exportTaskId + '.' + diskImageFormat.
    +	S3Prefix *string `locationName:"s3Prefix" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ExportToS3TaskSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ExportToS3TaskSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetContainerFormat sets the ContainerFormat field's value.
    +func (s *ExportToS3TaskSpecification) SetContainerFormat(v string) *ExportToS3TaskSpecification {
    +	s.ContainerFormat = &v
    +	return s
    +}
    +
    +// SetDiskImageFormat sets the DiskImageFormat field's value.
    +func (s *ExportToS3TaskSpecification) SetDiskImageFormat(v string) *ExportToS3TaskSpecification {
    +	s.DiskImageFormat = &v
    +	return s
    +}
    +
    +// SetS3Bucket sets the S3Bucket field's value.
    +func (s *ExportToS3TaskSpecification) SetS3Bucket(v string) *ExportToS3TaskSpecification {
    +	s.S3Bucket = &v
    +	return s
    +}
    +
    +// SetS3Prefix sets the S3Prefix field's value.
    +func (s *ExportToS3TaskSpecification) SetS3Prefix(v string) *ExportToS3TaskSpecification {
    +	s.S3Prefix = &v
    +	return s
    +}
    +
    +// A filter name and value pair that is used to return a more specific list
    +// of results. Filters can be used to match a set of resources by various criteria,
    +// such as tags, attributes, or IDs.
    +type Filter struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the filter. Filter names are case-sensitive.
    +	Name *string `type:"string"`
    +
    +	// One or more filter values. Filter values are case-sensitive.
    +	Values []*string `locationName:"Value" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s Filter) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Filter) GoString() string {
    +	return s.String()
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *Filter) SetName(v string) *Filter {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetValues sets the Values field's value.
    +func (s *Filter) SetValues(v []*string) *Filter {
    +	s.Values = v
    +	return s
    +}
    +
    +// Describes a flow log.
    +type FlowLog struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The date and time the flow log was created.
    +	CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Information about the error that occurred. Rate limited indicates that CloudWatch
    +	// logs throttling has been applied for one or more network interfaces, or that
    +	// you've reached the limit on the number of CloudWatch Logs log groups that
    +	// you can create. Access error indicates that the IAM role associated with
    +	// the flow log does not have sufficient permissions to publish to CloudWatch
    +	// Logs. Unknown error indicates an internal error.
    +	DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"`
    +
    +	// The ARN of the IAM role that posts logs to CloudWatch Logs.
    +	DeliverLogsPermissionArn *string `locationName:"deliverLogsPermissionArn" type:"string"`
    +
    +	// The status of the logs delivery (SUCCESS | FAILED).
    +	DeliverLogsStatus *string `locationName:"deliverLogsStatus" type:"string"`
    +
    +	// The flow log ID.
    +	FlowLogId *string `locationName:"flowLogId" type:"string"`
    +
    +	// The status of the flow log (ACTIVE).
    +	FlowLogStatus *string `locationName:"flowLogStatus" type:"string"`
    +
    +	// The name of the flow log group.
    +	LogGroupName *string `locationName:"logGroupName" type:"string"`
    +
    +	// The ID of the resource on which the flow log was created.
    +	ResourceId *string `locationName:"resourceId" type:"string"`
    +
    +	// The type of traffic captured for the flow log.
    +	TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"`
    +}
    +
    +// String returns the string representation
    +func (s FlowLog) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s FlowLog) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCreationTime sets the CreationTime field's value.
    +func (s *FlowLog) SetCreationTime(v time.Time) *FlowLog {
    +	s.CreationTime = &v
    +	return s
    +}
    +
    +// SetDeliverLogsErrorMessage sets the DeliverLogsErrorMessage field's value.
    +func (s *FlowLog) SetDeliverLogsErrorMessage(v string) *FlowLog {
    +	s.DeliverLogsErrorMessage = &v
    +	return s
    +}
    +
    +// SetDeliverLogsPermissionArn sets the DeliverLogsPermissionArn field's value.
    +func (s *FlowLog) SetDeliverLogsPermissionArn(v string) *FlowLog {
    +	s.DeliverLogsPermissionArn = &v
    +	return s
    +}
    +
    +// SetDeliverLogsStatus sets the DeliverLogsStatus field's value.
    +func (s *FlowLog) SetDeliverLogsStatus(v string) *FlowLog {
    +	s.DeliverLogsStatus = &v
    +	return s
    +}
    +
    +// SetFlowLogId sets the FlowLogId field's value.
    +func (s *FlowLog) SetFlowLogId(v string) *FlowLog {
    +	s.FlowLogId = &v
    +	return s
    +}
    +
    +// SetFlowLogStatus sets the FlowLogStatus field's value.
    +func (s *FlowLog) SetFlowLogStatus(v string) *FlowLog {
    +	s.FlowLogStatus = &v
    +	return s
    +}
    +
    +// SetLogGroupName sets the LogGroupName field's value.
    +func (s *FlowLog) SetLogGroupName(v string) *FlowLog {
    +	s.LogGroupName = &v
    +	return s
    +}
    +
    +// SetResourceId sets the ResourceId field's value.
    +func (s *FlowLog) SetResourceId(v string) *FlowLog {
    +	s.ResourceId = &v
    +	return s
    +}
    +
    +// SetTrafficType sets the TrafficType field's value.
    +func (s *FlowLog) SetTrafficType(v string) *FlowLog {
    +	s.TrafficType = &v
    +	return s
    +}
    +
    +// Contains the parameters for GetConsoleOutput.
    +type GetConsoleOutputInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s GetConsoleOutputInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetConsoleOutputInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetConsoleOutputInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetConsoleOutputInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *GetConsoleOutputInput) SetDryRun(v bool) *GetConsoleOutputInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *GetConsoleOutputInput) SetInstanceId(v string) *GetConsoleOutputInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// Contains the output of GetConsoleOutput.
    +type GetConsoleOutputOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The console output, Base64-encoded. If using a command line tool, the tool
    +	// decodes the output for you.
    +	Output *string `locationName:"output" type:"string"`
    +
    +	// The time the output was last updated.
    +	Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s GetConsoleOutputOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetConsoleOutputOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *GetConsoleOutputOutput) SetInstanceId(v string) *GetConsoleOutputOutput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetOutput sets the Output field's value.
    +func (s *GetConsoleOutputOutput) SetOutput(v string) *GetConsoleOutputOutput {
    +	s.Output = &v
    +	return s
    +}
    +
    +// SetTimestamp sets the Timestamp field's value.
    +func (s *GetConsoleOutputOutput) SetTimestamp(v time.Time) *GetConsoleOutputOutput {
    +	s.Timestamp = &v
    +	return s
    +}
    +
    +// Contains the parameters for the request.
    +type GetConsoleScreenshotInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `type:"string" required:"true"`
    +
    +	// When set to true, acts as keystroke input and wakes up an instance that's
    +	// in standby or "sleep" mode.
    +	WakeUp *bool `type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s GetConsoleScreenshotInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetConsoleScreenshotInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetConsoleScreenshotInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetConsoleScreenshotInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *GetConsoleScreenshotInput) SetDryRun(v bool) *GetConsoleScreenshotInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *GetConsoleScreenshotInput) SetInstanceId(v string) *GetConsoleScreenshotInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetWakeUp sets the WakeUp field's value.
    +func (s *GetConsoleScreenshotInput) SetWakeUp(v bool) *GetConsoleScreenshotInput {
    +	s.WakeUp = &v
    +	return s
    +}
    +
    +// Contains the output of the request.
    +type GetConsoleScreenshotOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The data that comprises the image.
    +	ImageData *string `locationName:"imageData" type:"string"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GetConsoleScreenshotOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetConsoleScreenshotOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImageData sets the ImageData field's value.
    +func (s *GetConsoleScreenshotOutput) SetImageData(v string) *GetConsoleScreenshotOutput {
    +	s.ImageData = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *GetConsoleScreenshotOutput) SetInstanceId(v string) *GetConsoleScreenshotOutput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +type GetHostReservationPurchasePreviewInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID/s of the Dedicated Host/s that the reservation will be associated
    +	// with.
    +	//
    +	// HostIdSet is a required field
    +	HostIdSet []*string `locationNameList:"item" type:"list" required:"true"`
    +
    +	// The offering ID of the reservation.
    +	//
    +	// OfferingId is a required field
    +	OfferingId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s GetHostReservationPurchasePreviewInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetHostReservationPurchasePreviewInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetHostReservationPurchasePreviewInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetHostReservationPurchasePreviewInput"}
    +	if s.HostIdSet == nil {
    +		invalidParams.Add(request.NewErrParamRequired("HostIdSet"))
    +	}
    +	if s.OfferingId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("OfferingId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetHostIdSet sets the HostIdSet field's value.
    +func (s *GetHostReservationPurchasePreviewInput) SetHostIdSet(v []*string) *GetHostReservationPurchasePreviewInput {
    +	s.HostIdSet = v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *GetHostReservationPurchasePreviewInput) SetOfferingId(v string) *GetHostReservationPurchasePreviewInput {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +type GetHostReservationPurchasePreviewOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The currency in which the totalUpfrontPrice and totalHourlyPrice amounts
    +	// are specified. At this time, the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The purchase information of the Dedicated Host Reservation and the Dedicated
    +	// Hosts associated with it.
    +	Purchase []*Purchase `locationName:"purchase" type:"list"`
    +
    +	// The potential total hourly price of the reservation per hour.
    +	TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"`
    +
    +	// The potential total upfront price. This is billed immediately.
    +	TotalUpfrontPrice *string `locationName:"totalUpfrontPrice" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GetHostReservationPurchasePreviewOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetHostReservationPurchasePreviewOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *GetHostReservationPurchasePreviewOutput) SetCurrencyCode(v string) *GetHostReservationPurchasePreviewOutput {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetPurchase sets the Purchase field's value.
    +func (s *GetHostReservationPurchasePreviewOutput) SetPurchase(v []*Purchase) *GetHostReservationPurchasePreviewOutput {
    +	s.Purchase = v
    +	return s
    +}
    +
    +// SetTotalHourlyPrice sets the TotalHourlyPrice field's value.
    +func (s *GetHostReservationPurchasePreviewOutput) SetTotalHourlyPrice(v string) *GetHostReservationPurchasePreviewOutput {
    +	s.TotalHourlyPrice = &v
    +	return s
    +}
    +
    +// SetTotalUpfrontPrice sets the TotalUpfrontPrice field's value.
    +func (s *GetHostReservationPurchasePreviewOutput) SetTotalUpfrontPrice(v string) *GetHostReservationPurchasePreviewOutput {
    +	s.TotalUpfrontPrice = &v
    +	return s
    +}
    +
    +// Contains the parameters for GetPasswordData.
    +type GetPasswordDataInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the Windows instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s GetPasswordDataInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetPasswordDataInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetPasswordDataInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetPasswordDataInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *GetPasswordDataInput) SetDryRun(v bool) *GetPasswordDataInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *GetPasswordDataInput) SetInstanceId(v string) *GetPasswordDataInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// Contains the output of GetPasswordData.
    +type GetPasswordDataOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the Windows instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The password of the instance.
    +	PasswordData *string `locationName:"passwordData" type:"string"`
    +
    +	// The time the data was last updated.
    +	Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s GetPasswordDataOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetPasswordDataOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *GetPasswordDataOutput) SetInstanceId(v string) *GetPasswordDataOutput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetPasswordData sets the PasswordData field's value.
    +func (s *GetPasswordDataOutput) SetPasswordData(v string) *GetPasswordDataOutput {
    +	s.PasswordData = &v
    +	return s
    +}
    +
    +// SetTimestamp sets the Timestamp field's value.
    +func (s *GetPasswordDataOutput) SetTimestamp(v time.Time) *GetPasswordDataOutput {
    +	s.Timestamp = &v
    +	return s
    +}
    +
    +// Contains the parameters for GetReservedInstanceExchangeQuote.
    +type GetReservedInstancesExchangeQuoteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The ID/s of the Convertible Reserved Instances you want to exchange.
    +	//
    +	// ReservedInstanceIds is a required field
    +	ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"`
    +
    +	// The configuration requirements of the Convertible Reserved Instances you
    +	// want in exchange for your current Convertible Reserved Instances.
    +	TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s GetReservedInstancesExchangeQuoteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetReservedInstancesExchangeQuoteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetReservedInstancesExchangeQuoteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetReservedInstancesExchangeQuoteInput"}
    +	if s.ReservedInstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReservedInstanceIds"))
    +	}
    +	if s.TargetConfigurations != nil {
    +		for i, v := range s.TargetConfigurations {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetConfigurations", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *GetReservedInstancesExchangeQuoteInput) SetDryRun(v bool) *GetReservedInstancesExchangeQuoteInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetReservedInstanceIds sets the ReservedInstanceIds field's value.
    +func (s *GetReservedInstancesExchangeQuoteInput) SetReservedInstanceIds(v []*string) *GetReservedInstancesExchangeQuoteInput {
    +	s.ReservedInstanceIds = v
    +	return s
    +}
    +
    +// SetTargetConfigurations sets the TargetConfigurations field's value.
    +func (s *GetReservedInstancesExchangeQuoteInput) SetTargetConfigurations(v []*TargetConfigurationRequest) *GetReservedInstancesExchangeQuoteInput {
    +	s.TargetConfigurations = v
    +	return s
    +}
    +
    +// Contains the output of GetReservedInstancesExchangeQuote.
    +type GetReservedInstancesExchangeQuoteOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The currency of the transaction.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string"`
    +
    +	// If true, the exchange is valid. If false, the exchange cannot be performed.
    +	IsValidExchange *bool `locationName:"isValidExchange" type:"boolean"`
    +
    +	// The new end date of the reservation term.
    +	OutputReservedInstancesWillExpireAt *time.Time `locationName:"outputReservedInstancesWillExpireAt" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The total true upfront charge for the exchange.
    +	PaymentDue *string `locationName:"paymentDue" type:"string"`
    +
    +	// The cost associated with the Reserved Instance.
    +	ReservedInstanceValueRollup *ReservationValue `locationName:"reservedInstanceValueRollup" type:"structure"`
    +
    +	// The configuration of your Convertible Reserved Instances.
    +	ReservedInstanceValueSet []*ReservedInstanceReservationValue `locationName:"reservedInstanceValueSet" locationNameList:"item" type:"list"`
    +
    +	// The cost associated with the Reserved Instance.
    +	TargetConfigurationValueRollup *ReservationValue `locationName:"targetConfigurationValueRollup" type:"structure"`
    +
    +	// The values of the target Convertible Reserved Instances.
    +	TargetConfigurationValueSet []*TargetReservationValue `locationName:"targetConfigurationValueSet" locationNameList:"item" type:"list"`
    +
    +	// Describes the reason why the exchange can not be completed.
    +	ValidationFailureReason *string `locationName:"validationFailureReason" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GetReservedInstancesExchangeQuoteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetReservedInstancesExchangeQuoteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetCurrencyCode(v string) *GetReservedInstancesExchangeQuoteOutput {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetIsValidExchange sets the IsValidExchange field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetIsValidExchange(v bool) *GetReservedInstancesExchangeQuoteOutput {
    +	s.IsValidExchange = &v
    +	return s
    +}
    +
    +// SetOutputReservedInstancesWillExpireAt sets the OutputReservedInstancesWillExpireAt field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetOutputReservedInstancesWillExpireAt(v time.Time) *GetReservedInstancesExchangeQuoteOutput {
    +	s.OutputReservedInstancesWillExpireAt = &v
    +	return s
    +}
    +
    +// SetPaymentDue sets the PaymentDue field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetPaymentDue(v string) *GetReservedInstancesExchangeQuoteOutput {
    +	s.PaymentDue = &v
    +	return s
    +}
    +
    +// SetReservedInstanceValueRollup sets the ReservedInstanceValueRollup field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetReservedInstanceValueRollup(v *ReservationValue) *GetReservedInstancesExchangeQuoteOutput {
    +	s.ReservedInstanceValueRollup = v
    +	return s
    +}
    +
    +// SetReservedInstanceValueSet sets the ReservedInstanceValueSet field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetReservedInstanceValueSet(v []*ReservedInstanceReservationValue) *GetReservedInstancesExchangeQuoteOutput {
    +	s.ReservedInstanceValueSet = v
    +	return s
    +}
    +
    +// SetTargetConfigurationValueRollup sets the TargetConfigurationValueRollup field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetTargetConfigurationValueRollup(v *ReservationValue) *GetReservedInstancesExchangeQuoteOutput {
    +	s.TargetConfigurationValueRollup = v
    +	return s
    +}
    +
    +// SetTargetConfigurationValueSet sets the TargetConfigurationValueSet field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetTargetConfigurationValueSet(v []*TargetReservationValue) *GetReservedInstancesExchangeQuoteOutput {
    +	s.TargetConfigurationValueSet = v
    +	return s
    +}
    +
    +// SetValidationFailureReason sets the ValidationFailureReason field's value.
    +func (s *GetReservedInstancesExchangeQuoteOutput) SetValidationFailureReason(v string) *GetReservedInstancesExchangeQuoteOutput {
    +	s.ValidationFailureReason = &v
    +	return s
    +}
    +
    +// Describes a security group.
    +type GroupIdentifier struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the security group.
    +	GroupId *string `locationName:"groupId" type:"string"`
    +
    +	// The name of the security group.
    +	GroupName *string `locationName:"groupName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GroupIdentifier) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GroupIdentifier) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *GroupIdentifier) SetGroupId(v string) *GroupIdentifier {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// Describes an event in the history of the Spot fleet request.
    +type HistoryRecord struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the event.
    +	//
    +	// EventInformation is a required field
    +	EventInformation *EventInformation `locationName:"eventInformation" type:"structure" required:"true"`
    +
    +	// The event type.
    +	//
    +	//    * error - Indicates an error with the Spot fleet request.
    +	//
    +	//    * fleetRequestChange - Indicates a change in the status or configuration
    +	//    of the Spot fleet request.
    +	//
    +	//    * instanceChange - Indicates that an instance was launched or terminated.
    +	//
    +	// EventType is a required field
    +	EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"`
    +
    +	// The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	//
    +	// Timestamp is a required field
    +	Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s HistoryRecord) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s HistoryRecord) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEventInformation sets the EventInformation field's value.
    +func (s *HistoryRecord) SetEventInformation(v *EventInformation) *HistoryRecord {
    +	s.EventInformation = v
    +	return s
    +}
    +
    +// SetEventType sets the EventType field's value.
    +func (s *HistoryRecord) SetEventType(v string) *HistoryRecord {
    +	s.EventType = &v
    +	return s
    +}
    +
    +// SetTimestamp sets the Timestamp field's value.
    +func (s *HistoryRecord) SetTimestamp(v time.Time) *HistoryRecord {
    +	s.Timestamp = &v
    +	return s
    +}
    +
    +// Describes the properties of the Dedicated Host.
    +type Host struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Whether auto-placement is on or off.
    +	AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"`
    +
    +	// The Availability Zone of the Dedicated Host.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The number of new instances that can be launched onto the Dedicated Host.
    +	AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure idempotency of the
    +	// request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// The ID of the Dedicated Host.
    +	HostId *string `locationName:"hostId" type:"string"`
    +
    +	// The hardware specifications of the Dedicated Host.
    +	HostProperties *HostProperties `locationName:"hostProperties" type:"structure"`
    +
    +	// The reservation ID of the Dedicated Host. This returns a null response if
    +	// the Dedicated Host doesn't have an associated reservation.
    +	HostReservationId *string `locationName:"hostReservationId" type:"string"`
    +
    +	// The IDs and instance type that are currently running on the Dedicated Host.
    +	Instances []*HostInstance `locationName:"instances" locationNameList:"item" type:"list"`
    +
    +	// The Dedicated Host's state.
    +	State *string `locationName:"state" type:"string" enum:"AllocationState"`
    +}
    +
    +// String returns the string representation
    +func (s Host) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Host) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAutoPlacement sets the AutoPlacement field's value.
    +func (s *Host) SetAutoPlacement(v string) *Host {
    +	s.AutoPlacement = &v
    +	return s
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *Host) SetAvailabilityZone(v string) *Host {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetAvailableCapacity sets the AvailableCapacity field's value.
    +func (s *Host) SetAvailableCapacity(v *AvailableCapacity) *Host {
    +	s.AvailableCapacity = v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *Host) SetClientToken(v string) *Host {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetHostId sets the HostId field's value.
    +func (s *Host) SetHostId(v string) *Host {
    +	s.HostId = &v
    +	return s
    +}
    +
    +// SetHostProperties sets the HostProperties field's value.
    +func (s *Host) SetHostProperties(v *HostProperties) *Host {
    +	s.HostProperties = v
    +	return s
    +}
    +
    +// SetHostReservationId sets the HostReservationId field's value.
    +func (s *Host) SetHostReservationId(v string) *Host {
    +	s.HostReservationId = &v
    +	return s
    +}
    +
    +// SetInstances sets the Instances field's value.
    +func (s *Host) SetInstances(v []*HostInstance) *Host {
    +	s.Instances = v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Host) SetState(v string) *Host {
    +	s.State = &v
    +	return s
    +}
    +
    +// Describes an instance running on a Dedicated Host.
    +type HostInstance struct {
    +	_ struct{} `type:"structure"`
    +
    +	// the IDs of instances that are running on the Dedicated Host.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The instance type size (for example, m3.medium) of the running instance.
    +	InstanceType *string `locationName:"instanceType" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s HostInstance) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s HostInstance) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *HostInstance) SetInstanceId(v string) *HostInstance {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *HostInstance) SetInstanceType(v string) *HostInstance {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// Details about the Dedicated Host Reservation offering.
    +type HostOffering struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The currency of the offering.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The duration of the offering (in seconds).
    +	Duration *int64 `locationName:"duration" type:"integer"`
    +
    +	// The hourly price of the offering.
    +	HourlyPrice *string `locationName:"hourlyPrice" type:"string"`
    +
    +	// The instance family of the offering.
    +	InstanceFamily *string `locationName:"instanceFamily" type:"string"`
    +
    +	// The ID of the offering.
    +	OfferingId *string `locationName:"offeringId" type:"string"`
    +
    +	// The available payment option.
    +	PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"`
    +
    +	// The upfront price of the offering. Does not apply to No Upfront offerings.
    +	UpfrontPrice *string `locationName:"upfrontPrice" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s HostOffering) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s HostOffering) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *HostOffering) SetCurrencyCode(v string) *HostOffering {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetDuration sets the Duration field's value.
    +func (s *HostOffering) SetDuration(v int64) *HostOffering {
    +	s.Duration = &v
    +	return s
    +}
    +
    +// SetHourlyPrice sets the HourlyPrice field's value.
    +func (s *HostOffering) SetHourlyPrice(v string) *HostOffering {
    +	s.HourlyPrice = &v
    +	return s
    +}
    +
    +// SetInstanceFamily sets the InstanceFamily field's value.
    +func (s *HostOffering) SetInstanceFamily(v string) *HostOffering {
    +	s.InstanceFamily = &v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *HostOffering) SetOfferingId(v string) *HostOffering {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +// SetPaymentOption sets the PaymentOption field's value.
    +func (s *HostOffering) SetPaymentOption(v string) *HostOffering {
    +	s.PaymentOption = &v
    +	return s
    +}
    +
    +// SetUpfrontPrice sets the UpfrontPrice field's value.
    +func (s *HostOffering) SetUpfrontPrice(v string) *HostOffering {
    +	s.UpfrontPrice = &v
    +	return s
    +}
    +
    +// Describes properties of a Dedicated Host.
    +type HostProperties struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of cores on the Dedicated Host.
    +	Cores *int64 `locationName:"cores" type:"integer"`
    +
    +	// The instance type size that the Dedicated Host supports (for example, m3.medium).
    +	InstanceType *string `locationName:"instanceType" type:"string"`
    +
    +	// The number of sockets on the Dedicated Host.
    +	Sockets *int64 `locationName:"sockets" type:"integer"`
    +
    +	// The number of vCPUs on the Dedicated Host.
    +	TotalVCpus *int64 `locationName:"totalVCpus" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s HostProperties) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s HostProperties) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCores sets the Cores field's value.
    +func (s *HostProperties) SetCores(v int64) *HostProperties {
    +	s.Cores = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *HostProperties) SetInstanceType(v string) *HostProperties {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetSockets sets the Sockets field's value.
    +func (s *HostProperties) SetSockets(v int64) *HostProperties {
    +	s.Sockets = &v
    +	return s
    +}
    +
    +// SetTotalVCpus sets the TotalVCpus field's value.
    +func (s *HostProperties) SetTotalVCpus(v int64) *HostProperties {
    +	s.TotalVCpus = &v
    +	return s
    +}
    +
    +// Details about the Dedicated Host Reservation and associated Dedicated Hosts.
    +type HostReservation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of Dedicated Hosts the reservation is associated with.
    +	Count *int64 `locationName:"count" type:"integer"`
    +
    +	// The currency in which the upfrontPrice and hourlyPrice amounts are specified.
    +	// At this time, the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The length of the reservation's term, specified in seconds. Can be 31536000
    +	// (1 year) | 94608000 (3 years).
    +	Duration *int64 `locationName:"duration" type:"integer"`
    +
    +	// The date and time that the reservation ends.
    +	End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The IDs of the Dedicated Hosts associated with the reservation.
    +	HostIdSet []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the reservation that specifies the associated Dedicated Hosts.
    +	HostReservationId *string `locationName:"hostReservationId" type:"string"`
    +
    +	// The hourly price of the reservation.
    +	HourlyPrice *string `locationName:"hourlyPrice" type:"string"`
    +
    +	// The instance family of the Dedicated Host Reservation. The instance family
    +	// on the Dedicated Host must be the same in order for it to benefit from the
    +	// reservation.
    +	InstanceFamily *string `locationName:"instanceFamily" type:"string"`
    +
    +	// The ID of the reservation. This remains the same regardless of which Dedicated
    +	// Hosts are associated with it.
    +	OfferingId *string `locationName:"offeringId" type:"string"`
    +
    +	// The payment option selected for this reservation.
    +	PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"`
    +
    +	// The date and time that the reservation started.
    +	Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The state of the reservation.
    +	State *string `locationName:"state" type:"string" enum:"ReservationState"`
    +
    +	// The upfront price of the reservation.
    +	UpfrontPrice *string `locationName:"upfrontPrice" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s HostReservation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s HostReservation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCount sets the Count field's value.
    +func (s *HostReservation) SetCount(v int64) *HostReservation {
    +	s.Count = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *HostReservation) SetCurrencyCode(v string) *HostReservation {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetDuration sets the Duration field's value.
    +func (s *HostReservation) SetDuration(v int64) *HostReservation {
    +	s.Duration = &v
    +	return s
    +}
    +
    +// SetEnd sets the End field's value.
    +func (s *HostReservation) SetEnd(v time.Time) *HostReservation {
    +	s.End = &v
    +	return s
    +}
    +
    +// SetHostIdSet sets the HostIdSet field's value.
    +func (s *HostReservation) SetHostIdSet(v []*string) *HostReservation {
    +	s.HostIdSet = v
    +	return s
    +}
    +
    +// SetHostReservationId sets the HostReservationId field's value.
    +func (s *HostReservation) SetHostReservationId(v string) *HostReservation {
    +	s.HostReservationId = &v
    +	return s
    +}
    +
    +// SetHourlyPrice sets the HourlyPrice field's value.
    +func (s *HostReservation) SetHourlyPrice(v string) *HostReservation {
    +	s.HourlyPrice = &v
    +	return s
    +}
    +
    +// SetInstanceFamily sets the InstanceFamily field's value.
    +func (s *HostReservation) SetInstanceFamily(v string) *HostReservation {
    +	s.InstanceFamily = &v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *HostReservation) SetOfferingId(v string) *HostReservation {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +// SetPaymentOption sets the PaymentOption field's value.
    +func (s *HostReservation) SetPaymentOption(v string) *HostReservation {
    +	s.PaymentOption = &v
    +	return s
    +}
    +
    +// SetStart sets the Start field's value.
    +func (s *HostReservation) SetStart(v time.Time) *HostReservation {
    +	s.Start = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *HostReservation) SetState(v string) *HostReservation {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetUpfrontPrice sets the UpfrontPrice field's value.
    +func (s *HostReservation) SetUpfrontPrice(v string) *HostReservation {
    +	s.UpfrontPrice = &v
    +	return s
    +}
    +
    +// Describes an IAM instance profile.
    +type IamInstanceProfile struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon Resource Name (ARN) of the instance profile.
    +	Arn *string `locationName:"arn" type:"string"`
    +
    +	// The ID of the instance profile.
    +	Id *string `locationName:"id" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s IamInstanceProfile) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s IamInstanceProfile) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArn sets the Arn field's value.
    +func (s *IamInstanceProfile) SetArn(v string) *IamInstanceProfile {
    +	s.Arn = &v
    +	return s
    +}
    +
    +// SetId sets the Id field's value.
    +func (s *IamInstanceProfile) SetId(v string) *IamInstanceProfile {
    +	s.Id = &v
    +	return s
    +}
    +
    +// Describes an IAM instance profile.
    +type IamInstanceProfileSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon Resource Name (ARN) of the instance profile.
    +	Arn *string `locationName:"arn" type:"string"`
    +
    +	// The name of the instance profile.
    +	Name *string `locationName:"name" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s IamInstanceProfileSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s IamInstanceProfileSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArn sets the Arn field's value.
    +func (s *IamInstanceProfileSpecification) SetArn(v string) *IamInstanceProfileSpecification {
    +	s.Arn = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *IamInstanceProfileSpecification) SetName(v string) *IamInstanceProfileSpecification {
    +	s.Name = &v
    +	return s
    +}
    +
    +// Describes the ICMP type and code.
    +type IcmpTypeCode struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ICMP type. A value of -1 means all types.
    +	Code *int64 `locationName:"code" type:"integer"`
    +
    +	// The ICMP code. A value of -1 means all codes for the specified ICMP type.
    +	Type *int64 `locationName:"type" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s IcmpTypeCode) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s IcmpTypeCode) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *IcmpTypeCode) SetCode(v int64) *IcmpTypeCode {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *IcmpTypeCode) SetType(v int64) *IcmpTypeCode {
    +	s.Type = &v
    +	return s
    +}
    +
    +// Describes the ID format for a resource.
    +type IdFormat struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The date in UTC at which you are permanently switched over to using longer
    +	// IDs. If a deadline is not yet available for this resource type, this field
    +	// is not returned.
    +	Deadline *time.Time `locationName:"deadline" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The type of resource.
    +	Resource *string `locationName:"resource" type:"string"`
    +
    +	// Indicates whether longer IDs (17-character IDs) are enabled for the resource.
    +	UseLongIds *bool `locationName:"useLongIds" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s IdFormat) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s IdFormat) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeadline sets the Deadline field's value.
    +func (s *IdFormat) SetDeadline(v time.Time) *IdFormat {
    +	s.Deadline = &v
    +	return s
    +}
    +
    +// SetResource sets the Resource field's value.
    +func (s *IdFormat) SetResource(v string) *IdFormat {
    +	s.Resource = &v
    +	return s
    +}
    +
    +// SetUseLongIds sets the UseLongIds field's value.
    +func (s *IdFormat) SetUseLongIds(v bool) *IdFormat {
    +	s.UseLongIds = &v
    +	return s
    +}
    +
    +// Describes an image.
    +type Image struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The architecture of the image.
    +	Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"`
    +
    +	// Any block device mapping entries.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// The date and time the image was created.
    +	CreationDate *string `locationName:"creationDate" type:"string"`
    +
    +	// The description of the AMI that was provided during image creation.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Specifies whether enhanced networking with ENA is enabled.
    +	EnaSupport *bool `locationName:"enaSupport" type:"boolean"`
    +
    +	// The hypervisor type of the image.
    +	Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"`
    +
    +	// The ID of the AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The location of the AMI.
    +	ImageLocation *string `locationName:"imageLocation" type:"string"`
    +
    +	// The AWS account alias (for example, amazon, self) or the AWS account ID of
    +	// the AMI owner.
    +	ImageOwnerAlias *string `locationName:"imageOwnerAlias" type:"string"`
    +
    +	// The type of image.
    +	ImageType *string `locationName:"imageType" type:"string" enum:"ImageTypeValues"`
    +
    +	// The kernel associated with the image, if any. Only applicable for machine
    +	// images.
    +	KernelId *string `locationName:"kernelId" type:"string"`
    +
    +	// The name of the AMI that was provided during image creation.
    +	Name *string `locationName:"name" type:"string"`
    +
    +	// The AWS account ID of the image owner.
    +	OwnerId *string `locationName:"imageOwnerId" type:"string"`
    +
    +	// The value is Windows for Windows AMIs; otherwise blank.
    +	Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"`
    +
    +	// Any product codes associated with the AMI.
    +	ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether the image has public launch permissions. The value is true
    +	// if this image has public launch permissions or false if it has only implicit
    +	// and explicit launch permissions.
    +	Public *bool `locationName:"isPublic" type:"boolean"`
    +
    +	// The RAM disk associated with the image, if any. Only applicable for machine
    +	// images.
    +	RamdiskId *string `locationName:"ramdiskId" type:"string"`
    +
    +	// The device name of the root device (for example, /dev/sda1 or /dev/xvda).
    +	RootDeviceName *string `locationName:"rootDeviceName" type:"string"`
    +
    +	// The type of root device used by the AMI. The AMI can use an EBS volume or
    +	// an instance store volume.
    +	RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"`
    +
    +	// Specifies whether enhanced networking with the Intel 82599 Virtual Function
    +	// interface is enabled.
    +	SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"`
    +
    +	// The current state of the AMI. If the state is available, the image is successfully
    +	// registered and can be used to launch an instance.
    +	State *string `locationName:"imageState" type:"string" enum:"ImageState"`
    +
    +	// The reason for the state change.
    +	StateReason *StateReason `locationName:"stateReason" type:"structure"`
    +
    +	// Any tags assigned to the image.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The type of virtualization of the AMI.
    +	VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"`
    +}
    +
    +// String returns the string representation
    +func (s Image) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Image) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *Image) SetArchitecture(v string) *Image {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *Image) SetBlockDeviceMappings(v []*BlockDeviceMapping) *Image {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetCreationDate sets the CreationDate field's value.
    +func (s *Image) SetCreationDate(v string) *Image {
    +	s.CreationDate = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *Image) SetDescription(v string) *Image {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetEnaSupport sets the EnaSupport field's value.
    +func (s *Image) SetEnaSupport(v bool) *Image {
    +	s.EnaSupport = &v
    +	return s
    +}
    +
    +// SetHypervisor sets the Hypervisor field's value.
    +func (s *Image) SetHypervisor(v string) *Image {
    +	s.Hypervisor = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *Image) SetImageId(v string) *Image {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetImageLocation sets the ImageLocation field's value.
    +func (s *Image) SetImageLocation(v string) *Image {
    +	s.ImageLocation = &v
    +	return s
    +}
    +
    +// SetImageOwnerAlias sets the ImageOwnerAlias field's value.
    +func (s *Image) SetImageOwnerAlias(v string) *Image {
    +	s.ImageOwnerAlias = &v
    +	return s
    +}
    +
    +// SetImageType sets the ImageType field's value.
    +func (s *Image) SetImageType(v string) *Image {
    +	s.ImageType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *Image) SetKernelId(v string) *Image {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *Image) SetName(v string) *Image {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *Image) SetOwnerId(v string) *Image {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *Image) SetPlatform(v string) *Image {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *Image) SetProductCodes(v []*ProductCode) *Image {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetPublic sets the Public field's value.
    +func (s *Image) SetPublic(v bool) *Image {
    +	s.Public = &v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *Image) SetRamdiskId(v string) *Image {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetRootDeviceName sets the RootDeviceName field's value.
    +func (s *Image) SetRootDeviceName(v string) *Image {
    +	s.RootDeviceName = &v
    +	return s
    +}
    +
    +// SetRootDeviceType sets the RootDeviceType field's value.
    +func (s *Image) SetRootDeviceType(v string) *Image {
    +	s.RootDeviceType = &v
    +	return s
    +}
    +
    +// SetSriovNetSupport sets the SriovNetSupport field's value.
    +func (s *Image) SetSriovNetSupport(v string) *Image {
    +	s.SriovNetSupport = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Image) SetState(v string) *Image {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStateReason sets the StateReason field's value.
    +func (s *Image) SetStateReason(v *StateReason) *Image {
    +	s.StateReason = v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *Image) SetTags(v []*Tag) *Image {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVirtualizationType sets the VirtualizationType field's value.
    +func (s *Image) SetVirtualizationType(v string) *Image {
    +	s.VirtualizationType = &v
    +	return s
    +}
    +
    +// Describes the disk container object for an import image task.
    +type ImageDiskContainer struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The description of the disk image.
    +	Description *string `type:"string"`
    +
    +	// The block device mapping for the disk.
    +	DeviceName *string `type:"string"`
    +
    +	// The format of the disk image being imported.
    +	//
    +	// Valid values: RAW | VHD | VMDK | OVA
    +	Format *string `type:"string"`
    +
    +	// The ID of the EBS snapshot to be used for importing the snapshot.
    +	SnapshotId *string `type:"string"`
    +
    +	// The URL to the Amazon S3-based disk image being imported. The URL can either
    +	// be a https URL (https://..) or an Amazon S3 URL (s3://..)
    +	Url *string `type:"string"`
    +
    +	// The S3 bucket for the disk image.
    +	UserBucket *UserBucket `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ImageDiskContainer) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImageDiskContainer) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImageDiskContainer) SetDescription(v string) *ImageDiskContainer {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDeviceName sets the DeviceName field's value.
    +func (s *ImageDiskContainer) SetDeviceName(v string) *ImageDiskContainer {
    +	s.DeviceName = &v
    +	return s
    +}
    +
    +// SetFormat sets the Format field's value.
    +func (s *ImageDiskContainer) SetFormat(v string) *ImageDiskContainer {
    +	s.Format = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *ImageDiskContainer) SetSnapshotId(v string) *ImageDiskContainer {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetUrl sets the Url field's value.
    +func (s *ImageDiskContainer) SetUrl(v string) *ImageDiskContainer {
    +	s.Url = &v
    +	return s
    +}
    +
    +// SetUserBucket sets the UserBucket field's value.
    +func (s *ImageDiskContainer) SetUserBucket(v *UserBucket) *ImageDiskContainer {
    +	s.UserBucket = v
    +	return s
    +}
    +
    +// Contains the parameters for ImportImage.
    +type ImportImageInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The architecture of the virtual machine.
    +	//
    +	// Valid values: i386 | x86_64
    +	Architecture *string `type:"string"`
    +
    +	// The client-specific data.
    +	ClientData *ClientData `type:"structure"`
    +
    +	// The token to enable idempotency for VM import requests.
    +	ClientToken *string `type:"string"`
    +
    +	// A description string for the import image task.
    +	Description *string `type:"string"`
    +
    +	// Information about the disk containers.
    +	DiskContainers []*ImageDiskContainer `locationName:"DiskContainer" locationNameList:"item" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The target hypervisor platform.
    +	//
    +	// Valid values: xen
    +	Hypervisor *string `type:"string"`
    +
    +	// The license type to be used for the Amazon Machine Image (AMI) after importing.
    +	//
    +	// Note: You may only use BYOL if you have existing licenses with rights to
    +	// use these licenses in a third party cloud like AWS. For more information,
    +	// see Prerequisites (http://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image)
    +	// in the VM Import/Export User Guide.
    +	//
    +	// Valid values: AWS | BYOL
    +	LicenseType *string `type:"string"`
    +
    +	// The operating system of the virtual machine.
    +	//
    +	// Valid values: Windows | Linux
    +	Platform *string `type:"string"`
    +
    +	// The name of the role to use when not using the default role, 'vmimport'.
    +	RoleName *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ImportImageInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportImageInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *ImportImageInput) SetArchitecture(v string) *ImportImageInput {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetClientData sets the ClientData field's value.
    +func (s *ImportImageInput) SetClientData(v *ClientData) *ImportImageInput {
    +	s.ClientData = v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *ImportImageInput) SetClientToken(v string) *ImportImageInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportImageInput) SetDescription(v string) *ImportImageInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDiskContainers sets the DiskContainers field's value.
    +func (s *ImportImageInput) SetDiskContainers(v []*ImageDiskContainer) *ImportImageInput {
    +	s.DiskContainers = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ImportImageInput) SetDryRun(v bool) *ImportImageInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetHypervisor sets the Hypervisor field's value.
    +func (s *ImportImageInput) SetHypervisor(v string) *ImportImageInput {
    +	s.Hypervisor = &v
    +	return s
    +}
    +
    +// SetLicenseType sets the LicenseType field's value.
    +func (s *ImportImageInput) SetLicenseType(v string) *ImportImageInput {
    +	s.LicenseType = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ImportImageInput) SetPlatform(v string) *ImportImageInput {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetRoleName sets the RoleName field's value.
    +func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput {
    +	s.RoleName = &v
    +	return s
    +}
    +
    +// Contains the output for ImportImage.
    +type ImportImageOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The architecture of the virtual machine.
    +	Architecture *string `locationName:"architecture" type:"string"`
    +
    +	// A description of the import task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The target hypervisor of the import task.
    +	Hypervisor *string `locationName:"hypervisor" type:"string"`
    +
    +	// The ID of the Amazon Machine Image (AMI) created by the import task.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The task ID of the import image task.
    +	ImportTaskId *string `locationName:"importTaskId" type:"string"`
    +
    +	// The license type of the virtual machine.
    +	LicenseType *string `locationName:"licenseType" type:"string"`
    +
    +	// The operating system of the virtual machine.
    +	Platform *string `locationName:"platform" type:"string"`
    +
    +	// The progress of the task.
    +	Progress *string `locationName:"progress" type:"string"`
    +
    +	// Information about the snapshots.
    +	SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"`
    +
    +	// A brief status of the task.
    +	Status *string `locationName:"status" type:"string"`
    +
    +	// A detailed status message of the import task.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ImportImageOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportImageOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *ImportImageOutput) SetArchitecture(v string) *ImportImageOutput {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportImageOutput) SetDescription(v string) *ImportImageOutput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetHypervisor sets the Hypervisor field's value.
    +func (s *ImportImageOutput) SetHypervisor(v string) *ImportImageOutput {
    +	s.Hypervisor = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *ImportImageOutput) SetImageId(v string) *ImportImageOutput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetImportTaskId sets the ImportTaskId field's value.
    +func (s *ImportImageOutput) SetImportTaskId(v string) *ImportImageOutput {
    +	s.ImportTaskId = &v
    +	return s
    +}
    +
    +// SetLicenseType sets the LicenseType field's value.
    +func (s *ImportImageOutput) SetLicenseType(v string) *ImportImageOutput {
    +	s.LicenseType = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ImportImageOutput) SetPlatform(v string) *ImportImageOutput {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetProgress sets the Progress field's value.
    +func (s *ImportImageOutput) SetProgress(v string) *ImportImageOutput {
    +	s.Progress = &v
    +	return s
    +}
    +
    +// SetSnapshotDetails sets the SnapshotDetails field's value.
    +func (s *ImportImageOutput) SetSnapshotDetails(v []*SnapshotDetail) *ImportImageOutput {
    +	s.SnapshotDetails = v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ImportImageOutput) SetStatus(v string) *ImportImageOutput {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ImportImageOutput) SetStatusMessage(v string) *ImportImageOutput {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// Describes an import image task.
    +type ImportImageTask struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The architecture of the virtual machine.
    +	//
    +	// Valid values: i386 | x86_64
    +	Architecture *string `locationName:"architecture" type:"string"`
    +
    +	// A description of the import task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The target hypervisor for the import task.
    +	//
    +	// Valid values: xen
    +	Hypervisor *string `locationName:"hypervisor" type:"string"`
    +
    +	// The ID of the Amazon Machine Image (AMI) of the imported virtual machine.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The ID of the import image task.
    +	ImportTaskId *string `locationName:"importTaskId" type:"string"`
    +
    +	// The license type of the virtual machine.
    +	LicenseType *string `locationName:"licenseType" type:"string"`
    +
    +	// The description string for the import image task.
    +	Platform *string `locationName:"platform" type:"string"`
    +
    +	// The percentage of progress of the import image task.
    +	Progress *string `locationName:"progress" type:"string"`
    +
    +	// Information about the snapshots.
    +	SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"`
    +
    +	// A brief status for the import image task.
    +	Status *string `locationName:"status" type:"string"`
    +
    +	// A descriptive status message for the import image task.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ImportImageTask) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportImageTask) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *ImportImageTask) SetArchitecture(v string) *ImportImageTask {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportImageTask) SetDescription(v string) *ImportImageTask {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetHypervisor sets the Hypervisor field's value.
    +func (s *ImportImageTask) SetHypervisor(v string) *ImportImageTask {
    +	s.Hypervisor = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *ImportImageTask) SetImageId(v string) *ImportImageTask {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetImportTaskId sets the ImportTaskId field's value.
    +func (s *ImportImageTask) SetImportTaskId(v string) *ImportImageTask {
    +	s.ImportTaskId = &v
    +	return s
    +}
    +
    +// SetLicenseType sets the LicenseType field's value.
    +func (s *ImportImageTask) SetLicenseType(v string) *ImportImageTask {
    +	s.LicenseType = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ImportImageTask) SetPlatform(v string) *ImportImageTask {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetProgress sets the Progress field's value.
    +func (s *ImportImageTask) SetProgress(v string) *ImportImageTask {
    +	s.Progress = &v
    +	return s
    +}
    +
    +// SetSnapshotDetails sets the SnapshotDetails field's value.
    +func (s *ImportImageTask) SetSnapshotDetails(v []*SnapshotDetail) *ImportImageTask {
    +	s.SnapshotDetails = v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ImportImageTask) SetStatus(v string) *ImportImageTask {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ImportImageTask) SetStatusMessage(v string) *ImportImageTask {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// Contains the parameters for ImportInstance.
    +type ImportInstanceInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the instance being imported.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The disk image.
    +	DiskImages []*DiskImage `locationName:"diskImage" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The launch specification.
    +	LaunchSpecification *ImportInstanceLaunchSpecification `locationName:"launchSpecification" type:"structure"`
    +
    +	// The instance operating system.
    +	//
    +	// Platform is a required field
    +	Platform *string `locationName:"platform" type:"string" required:"true" enum:"PlatformValues"`
    +}
    +
    +// String returns the string representation
    +func (s ImportInstanceInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportInstanceInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ImportInstanceInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ImportInstanceInput"}
    +	if s.Platform == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Platform"))
    +	}
    +	if s.DiskImages != nil {
    +		for i, v := range s.DiskImages {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DiskImages", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportInstanceInput) SetDescription(v string) *ImportInstanceInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDiskImages sets the DiskImages field's value.
    +func (s *ImportInstanceInput) SetDiskImages(v []*DiskImage) *ImportInstanceInput {
    +	s.DiskImages = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ImportInstanceInput) SetDryRun(v bool) *ImportInstanceInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetLaunchSpecification sets the LaunchSpecification field's value.
    +func (s *ImportInstanceInput) SetLaunchSpecification(v *ImportInstanceLaunchSpecification) *ImportInstanceInput {
    +	s.LaunchSpecification = v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ImportInstanceInput) SetPlatform(v string) *ImportInstanceInput {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// Describes the launch specification for VM import.
    +type ImportInstanceLaunchSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Reserved.
    +	AdditionalInfo *string `locationName:"additionalInfo" type:"string"`
    +
    +	// The architecture of the instance.
    +	Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"`
    +
    +	// One or more security group IDs.
    +	GroupIds []*string `locationName:"GroupId" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// One or more security group names.
    +	GroupNames []*string `locationName:"GroupName" locationNameList:"SecurityGroup" type:"list"`
    +
    +	// Indicates whether an instance stops or terminates when you initiate shutdown
    +	// from the instance (using the operating system command for system shutdown).
    +	InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"`
    +
    +	// The instance type. For more information about the instance types that you
    +	// can import, see Instance Types (http://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#vmimport-instance-types)
    +	// in the VM Import/Export User Guide.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// Indicates whether monitoring is enabled.
    +	Monitoring *bool `locationName:"monitoring" type:"boolean"`
    +
    +	// The placement information for the instance.
    +	Placement *Placement `locationName:"placement" type:"structure"`
    +
    +	// [EC2-VPC] An available IP address from the IP address range of the subnet.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// [EC2-VPC] The ID of the subnet in which to launch the instance.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// The user data to make available to the instance. If you are using an AWS
    +	// SDK or command line tool, Base64-encoding is performed for you, and you can
    +	// load the text from a file. Otherwise, you must provide Base64-encoded text.
    +	UserData *UserData `locationName:"userData" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ImportInstanceLaunchSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportInstanceLaunchSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAdditionalInfo sets the AdditionalInfo field's value.
    +func (s *ImportInstanceLaunchSpecification) SetAdditionalInfo(v string) *ImportInstanceLaunchSpecification {
    +	s.AdditionalInfo = &v
    +	return s
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *ImportInstanceLaunchSpecification) SetArchitecture(v string) *ImportInstanceLaunchSpecification {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetGroupIds sets the GroupIds field's value.
    +func (s *ImportInstanceLaunchSpecification) SetGroupIds(v []*string) *ImportInstanceLaunchSpecification {
    +	s.GroupIds = v
    +	return s
    +}
    +
    +// SetGroupNames sets the GroupNames field's value.
    +func (s *ImportInstanceLaunchSpecification) SetGroupNames(v []*string) *ImportInstanceLaunchSpecification {
    +	s.GroupNames = v
    +	return s
    +}
    +
    +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value.
    +func (s *ImportInstanceLaunchSpecification) SetInstanceInitiatedShutdownBehavior(v string) *ImportInstanceLaunchSpecification {
    +	s.InstanceInitiatedShutdownBehavior = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ImportInstanceLaunchSpecification) SetInstanceType(v string) *ImportInstanceLaunchSpecification {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *ImportInstanceLaunchSpecification) SetMonitoring(v bool) *ImportInstanceLaunchSpecification {
    +	s.Monitoring = &v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *ImportInstanceLaunchSpecification) SetPlacement(v *Placement) *ImportInstanceLaunchSpecification {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *ImportInstanceLaunchSpecification) SetPrivateIpAddress(v string) *ImportInstanceLaunchSpecification {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *ImportInstanceLaunchSpecification) SetSubnetId(v string) *ImportInstanceLaunchSpecification {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *ImportInstanceLaunchSpecification) SetUserData(v *UserData) *ImportInstanceLaunchSpecification {
    +	s.UserData = v
    +	return s
    +}
    +
    +// Contains the output for ImportInstance.
    +type ImportInstanceOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the conversion task.
    +	ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ImportInstanceOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportInstanceOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetConversionTask sets the ConversionTask field's value.
    +func (s *ImportInstanceOutput) SetConversionTask(v *ConversionTask) *ImportInstanceOutput {
    +	s.ConversionTask = v
    +	return s
    +}
    +
    +// Describes an import instance task.
    +type ImportInstanceTaskDetails struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The instance operating system.
    +	Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"`
    +
    +	// One or more volumes.
    +	//
    +	// Volumes is a required field
    +	Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ImportInstanceTaskDetails) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportInstanceTaskDetails) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportInstanceTaskDetails) SetDescription(v string) *ImportInstanceTaskDetails {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ImportInstanceTaskDetails) SetInstanceId(v string) *ImportInstanceTaskDetails {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ImportInstanceTaskDetails) SetPlatform(v string) *ImportInstanceTaskDetails {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetVolumes sets the Volumes field's value.
    +func (s *ImportInstanceTaskDetails) SetVolumes(v []*ImportInstanceVolumeDetailItem) *ImportInstanceTaskDetails {
    +	s.Volumes = v
    +	return s
    +}
    +
    +// Describes an import volume task.
    +type ImportInstanceVolumeDetailItem struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone where the resulting instance will reside.
    +	//
    +	// AvailabilityZone is a required field
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
    +
    +	// The number of bytes converted so far.
    +	//
    +	// BytesConverted is a required field
    +	BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"`
    +
    +	// A description of the task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The image.
    +	//
    +	// Image is a required field
    +	Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"`
    +
    +	// The status of the import of this particular disk image.
    +	//
    +	// Status is a required field
    +	Status *string `locationName:"status" type:"string" required:"true"`
    +
    +	// The status information or errors related to the disk image.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// The volume.
    +	//
    +	// Volume is a required field
    +	Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ImportInstanceVolumeDetailItem) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportInstanceVolumeDetailItem) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetAvailabilityZone(v string) *ImportInstanceVolumeDetailItem {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetBytesConverted sets the BytesConverted field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetBytesConverted(v int64) *ImportInstanceVolumeDetailItem {
    +	s.BytesConverted = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetDescription(v string) *ImportInstanceVolumeDetailItem {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetImage sets the Image field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetImage(v *DiskImageDescription) *ImportInstanceVolumeDetailItem {
    +	s.Image = v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetStatus(v string) *ImportInstanceVolumeDetailItem {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetStatusMessage(v string) *ImportInstanceVolumeDetailItem {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// SetVolume sets the Volume field's value.
    +func (s *ImportInstanceVolumeDetailItem) SetVolume(v *DiskImageVolumeDescription) *ImportInstanceVolumeDetailItem {
    +	s.Volume = v
    +	return s
    +}
    +
    +// Contains the parameters for ImportKeyPair.
    +type ImportKeyPairInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// A unique name for the key pair.
    +	//
    +	// KeyName is a required field
    +	KeyName *string `locationName:"keyName" type:"string" required:"true"`
    +
    +	// The public key. For API calls, the text must be base64-encoded. For command
    +	// line tools, base64 encoding is performed for you.
    +	//
    +	// PublicKeyMaterial is automatically base64 encoded/decoded by the SDK.
    +	//
    +	// PublicKeyMaterial is a required field
    +	PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ImportKeyPairInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportKeyPairInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ImportKeyPairInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ImportKeyPairInput"}
    +	if s.KeyName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("KeyName"))
    +	}
    +	if s.PublicKeyMaterial == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PublicKeyMaterial"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ImportKeyPairInput) SetDryRun(v bool) *ImportKeyPairInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *ImportKeyPairInput) SetKeyName(v string) *ImportKeyPairInput {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetPublicKeyMaterial sets the PublicKeyMaterial field's value.
    +func (s *ImportKeyPairInput) SetPublicKeyMaterial(v []byte) *ImportKeyPairInput {
    +	s.PublicKeyMaterial = v
    +	return s
    +}
    +
    +// Contains the output of ImportKeyPair.
    +type ImportKeyPairOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The MD5 public key fingerprint as specified in section 4 of RFC 4716.
    +	KeyFingerprint *string `locationName:"keyFingerprint" type:"string"`
    +
    +	// The key pair name you provided.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ImportKeyPairOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportKeyPairOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKeyFingerprint sets the KeyFingerprint field's value.
    +func (s *ImportKeyPairOutput) SetKeyFingerprint(v string) *ImportKeyPairOutput {
    +	s.KeyFingerprint = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *ImportKeyPairOutput) SetKeyName(v string) *ImportKeyPairOutput {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// Contains the parameters for ImportSnapshot.
    +type ImportSnapshotInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The client-specific data.
    +	ClientData *ClientData `type:"structure"`
    +
    +	// Token to enable idempotency for VM import requests.
    +	ClientToken *string `type:"string"`
    +
    +	// The description string for the import snapshot task.
    +	Description *string `type:"string"`
    +
    +	// Information about the disk container.
    +	DiskContainer *SnapshotDiskContainer `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The name of the role to use when not using the default role, 'vmimport'.
    +	RoleName *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ImportSnapshotInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportSnapshotInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientData sets the ClientData field's value.
    +func (s *ImportSnapshotInput) SetClientData(v *ClientData) *ImportSnapshotInput {
    +	s.ClientData = v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *ImportSnapshotInput) SetClientToken(v string) *ImportSnapshotInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportSnapshotInput) SetDescription(v string) *ImportSnapshotInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDiskContainer sets the DiskContainer field's value.
    +func (s *ImportSnapshotInput) SetDiskContainer(v *SnapshotDiskContainer) *ImportSnapshotInput {
    +	s.DiskContainer = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ImportSnapshotInput) SetDryRun(v bool) *ImportSnapshotInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetRoleName sets the RoleName field's value.
    +func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput {
    +	s.RoleName = &v
    +	return s
    +}
    +
    +// Contains the output for ImportSnapshot.
    +type ImportSnapshotOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the import snapshot task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of the import snapshot task.
    +	ImportTaskId *string `locationName:"importTaskId" type:"string"`
    +
    +	// Information about the import snapshot task.
    +	SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ImportSnapshotOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportSnapshotOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportSnapshotOutput) SetDescription(v string) *ImportSnapshotOutput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetImportTaskId sets the ImportTaskId field's value.
    +func (s *ImportSnapshotOutput) SetImportTaskId(v string) *ImportSnapshotOutput {
    +	s.ImportTaskId = &v
    +	return s
    +}
    +
    +// SetSnapshotTaskDetail sets the SnapshotTaskDetail field's value.
    +func (s *ImportSnapshotOutput) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *ImportSnapshotOutput {
    +	s.SnapshotTaskDetail = v
    +	return s
    +}
    +
    +// Describes an import snapshot task.
    +type ImportSnapshotTask struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the import snapshot task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of the import snapshot task.
    +	ImportTaskId *string `locationName:"importTaskId" type:"string"`
    +
    +	// Describes an import snapshot task.
    +	SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ImportSnapshotTask) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportSnapshotTask) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportSnapshotTask) SetDescription(v string) *ImportSnapshotTask {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetImportTaskId sets the ImportTaskId field's value.
    +func (s *ImportSnapshotTask) SetImportTaskId(v string) *ImportSnapshotTask {
    +	s.ImportTaskId = &v
    +	return s
    +}
    +
    +// SetSnapshotTaskDetail sets the SnapshotTaskDetail field's value.
    +func (s *ImportSnapshotTask) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *ImportSnapshotTask {
    +	s.SnapshotTaskDetail = v
    +	return s
    +}
    +
    +// Contains the parameters for ImportVolume.
    +type ImportVolumeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone for the resulting EBS volume.
    +	//
    +	// AvailabilityZone is a required field
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
    +
    +	// A description of the volume.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The disk image.
    +	//
    +	// Image is a required field
    +	Image *DiskImageDetail `locationName:"image" type:"structure" required:"true"`
    +
    +	// The volume size.
    +	//
    +	// Volume is a required field
    +	Volume *VolumeDetail `locationName:"volume" type:"structure" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ImportVolumeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportVolumeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ImportVolumeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ImportVolumeInput"}
    +	if s.AvailabilityZone == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AvailabilityZone"))
    +	}
    +	if s.Image == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Image"))
    +	}
    +	if s.Volume == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Volume"))
    +	}
    +	if s.Image != nil {
    +		if err := s.Image.Validate(); err != nil {
    +			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
    +		}
    +	}
    +	if s.Volume != nil {
    +		if err := s.Volume.Validate(); err != nil {
    +			invalidParams.AddNested("Volume", err.(request.ErrInvalidParams))
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ImportVolumeInput) SetAvailabilityZone(v string) *ImportVolumeInput {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportVolumeInput) SetDescription(v string) *ImportVolumeInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ImportVolumeInput) SetDryRun(v bool) *ImportVolumeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetImage sets the Image field's value.
    +func (s *ImportVolumeInput) SetImage(v *DiskImageDetail) *ImportVolumeInput {
    +	s.Image = v
    +	return s
    +}
    +
    +// SetVolume sets the Volume field's value.
    +func (s *ImportVolumeInput) SetVolume(v *VolumeDetail) *ImportVolumeInput {
    +	s.Volume = v
    +	return s
    +}
    +
    +// Contains the output for ImportVolume.
    +type ImportVolumeOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the conversion task.
    +	ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ImportVolumeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportVolumeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetConversionTask sets the ConversionTask field's value.
    +func (s *ImportVolumeOutput) SetConversionTask(v *ConversionTask) *ImportVolumeOutput {
    +	s.ConversionTask = v
    +	return s
    +}
    +
    +// Describes an import volume task.
    +type ImportVolumeTaskDetails struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone where the resulting volume will reside.
    +	//
    +	// AvailabilityZone is a required field
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
    +
    +	// The number of bytes converted so far.
    +	//
    +	// BytesConverted is a required field
    +	BytesConverted *int64 `locationName:"bytesConverted" type:"long" required:"true"`
    +
    +	// The description you provided when starting the import volume task.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The image.
    +	//
    +	// Image is a required field
    +	Image *DiskImageDescription `locationName:"image" type:"structure" required:"true"`
    +
    +	// The volume.
    +	//
    +	// Volume is a required field
    +	Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ImportVolumeTaskDetails) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ImportVolumeTaskDetails) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ImportVolumeTaskDetails) SetAvailabilityZone(v string) *ImportVolumeTaskDetails {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetBytesConverted sets the BytesConverted field's value.
    +func (s *ImportVolumeTaskDetails) SetBytesConverted(v int64) *ImportVolumeTaskDetails {
    +	s.BytesConverted = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ImportVolumeTaskDetails) SetDescription(v string) *ImportVolumeTaskDetails {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetImage sets the Image field's value.
    +func (s *ImportVolumeTaskDetails) SetImage(v *DiskImageDescription) *ImportVolumeTaskDetails {
    +	s.Image = v
    +	return s
    +}
    +
    +// SetVolume sets the Volume field's value.
    +func (s *ImportVolumeTaskDetails) SetVolume(v *DiskImageVolumeDescription) *ImportVolumeTaskDetails {
    +	s.Volume = v
    +	return s
    +}
    +
    +// Describes an instance.
    +type Instance struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The AMI launch index, which can be used to find this instance in the launch
    +	// group.
    +	AmiLaunchIndex *int64 `locationName:"amiLaunchIndex" type:"integer"`
    +
    +	// The architecture of the image.
    +	Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"`
    +
    +	// Any block device mapping entries for the instance.
    +	BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// The idempotency token you provided when you launched the instance, if applicable.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// Indicates whether the instance is optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS
    +	// Optimized instance.
    +	EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
    +
    +	// Specifies whether enhanced networking with ENA is enabled.
    +	EnaSupport *bool `locationName:"enaSupport" type:"boolean"`
    +
    +	// The hypervisor type of the instance.
    +	Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"`
    +
    +	// The IAM instance profile associated with the instance, if applicable.
    +	IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"`
    +
    +	// The ID of the AMI used to launch the instance.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// Indicates whether this is a Spot instance or a Scheduled Instance.
    +	InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// The kernel associated with this instance, if applicable.
    +	KernelId *string `locationName:"kernelId" type:"string"`
    +
    +	// The name of the key pair, if this instance was launched with an associated
    +	// key pair.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +
    +	// The time the instance was launched.
    +	LaunchTime *time.Time `locationName:"launchTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The monitoring information for the instance.
    +	Monitoring *Monitoring `locationName:"monitoring" type:"structure"`
    +
    +	// [EC2-VPC] One or more network interfaces for the instance.
    +	NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
    +
    +	// The location where the instance launched, if applicable.
    +	Placement *Placement `locationName:"placement" type:"structure"`
    +
    +	// The value is Windows for Windows instances; otherwise blank.
    +	Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"`
    +
    +	// The private DNS name assigned to the instance. This DNS name can only be
    +	// used inside the Amazon EC2 network. This name is not available until the
    +	// instance enters the running state. For EC2-VPC, this name is only available
    +	// if you've enabled DNS hostnames for your VPC.
    +	PrivateDnsName *string `locationName:"privateDnsName" type:"string"`
    +
    +	// The private IP address assigned to the instance.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// The product codes attached to this instance, if applicable.
    +	ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
    +
    +	// The public DNS name assigned to the instance. This name is not available
    +	// until the instance enters the running state. For EC2-VPC, this name is only
    +	// available if you've enabled DNS hostnames for your VPC.
    +	PublicDnsName *string `locationName:"dnsName" type:"string"`
    +
    +	// The public IP address assigned to the instance, if applicable.
    +	PublicIpAddress *string `locationName:"ipAddress" type:"string"`
    +
    +	// The RAM disk associated with this instance, if applicable.
    +	RamdiskId *string `locationName:"ramdiskId" type:"string"`
    +
    +	// The root device name (for example, /dev/sda1 or /dev/xvda).
    +	RootDeviceName *string `locationName:"rootDeviceName" type:"string"`
    +
    +	// The root device type used by the AMI. The AMI can use an EBS volume or an
    +	// instance store volume.
    +	RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"`
    +
    +	// One or more security groups for the instance.
    +	SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// Specifies whether to enable an instance launched in a VPC to perform NAT.
    +	// This controls whether source/destination checking is enabled on the instance.
    +	// A value of true means checking is enabled, and false means checking is disabled.
    +	// The value must be false for the instance to perform NAT. For more information,
    +	// see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html)
    +	// in the Amazon Virtual Private Cloud User Guide.
    +	SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"`
    +
    +	// If the request is a Spot instance request, the ID of the request.
    +	SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"`
    +
    +	// Specifies whether enhanced networking with the Intel 82599 Virtual Function
    +	// interface is enabled.
    +	SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"`
    +
    +	// The current state of the instance.
    +	State *InstanceState `locationName:"instanceState" type:"structure"`
    +
    +	// The reason for the most recent state transition.
    +	StateReason *StateReason `locationName:"stateReason" type:"structure"`
    +
    +	// The reason for the most recent state transition. This might be an empty string.
    +	StateTransitionReason *string `locationName:"reason" type:"string"`
    +
    +	// [EC2-VPC] The ID of the subnet in which the instance is running.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// Any tags assigned to the instance.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The virtualization type of the instance.
    +	VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"`
    +
    +	// [EC2-VPC] The ID of the VPC in which the instance is running.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Instance) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Instance) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAmiLaunchIndex sets the AmiLaunchIndex field's value.
    +func (s *Instance) SetAmiLaunchIndex(v int64) *Instance {
    +	s.AmiLaunchIndex = &v
    +	return s
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *Instance) SetArchitecture(v string) *Instance {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *Instance) SetBlockDeviceMappings(v []*InstanceBlockDeviceMapping) *Instance {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *Instance) SetClientToken(v string) *Instance {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *Instance) SetEbsOptimized(v bool) *Instance {
    +	s.EbsOptimized = &v
    +	return s
    +}
    +
    +// SetEnaSupport sets the EnaSupport field's value.
    +func (s *Instance) SetEnaSupport(v bool) *Instance {
    +	s.EnaSupport = &v
    +	return s
    +}
    +
    +// SetHypervisor sets the Hypervisor field's value.
    +func (s *Instance) SetHypervisor(v string) *Instance {
    +	s.Hypervisor = &v
    +	return s
    +}
    +
    +// SetIamInstanceProfile sets the IamInstanceProfile field's value.
    +func (s *Instance) SetIamInstanceProfile(v *IamInstanceProfile) *Instance {
    +	s.IamInstanceProfile = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *Instance) SetImageId(v string) *Instance {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *Instance) SetInstanceId(v string) *Instance {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceLifecycle sets the InstanceLifecycle field's value.
    +func (s *Instance) SetInstanceLifecycle(v string) *Instance {
    +	s.InstanceLifecycle = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *Instance) SetInstanceType(v string) *Instance {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *Instance) SetKernelId(v string) *Instance {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *Instance) SetKeyName(v string) *Instance {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetLaunchTime sets the LaunchTime field's value.
    +func (s *Instance) SetLaunchTime(v time.Time) *Instance {
    +	s.LaunchTime = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *Instance) SetMonitoring(v *Monitoring) *Instance {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *Instance) SetNetworkInterfaces(v []*InstanceNetworkInterface) *Instance {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *Instance) SetPlacement(v *Placement) *Instance {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *Instance) SetPlatform(v string) *Instance {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetPrivateDnsName sets the PrivateDnsName field's value.
    +func (s *Instance) SetPrivateDnsName(v string) *Instance {
    +	s.PrivateDnsName = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *Instance) SetPrivateIpAddress(v string) *Instance {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *Instance) SetProductCodes(v []*ProductCode) *Instance {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetPublicDnsName sets the PublicDnsName field's value.
    +func (s *Instance) SetPublicDnsName(v string) *Instance {
    +	s.PublicDnsName = &v
    +	return s
    +}
    +
    +// SetPublicIpAddress sets the PublicIpAddress field's value.
    +func (s *Instance) SetPublicIpAddress(v string) *Instance {
    +	s.PublicIpAddress = &v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *Instance) SetRamdiskId(v string) *Instance {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetRootDeviceName sets the RootDeviceName field's value.
    +func (s *Instance) SetRootDeviceName(v string) *Instance {
    +	s.RootDeviceName = &v
    +	return s
    +}
    +
    +// SetRootDeviceType sets the RootDeviceType field's value.
    +func (s *Instance) SetRootDeviceType(v string) *Instance {
    +	s.RootDeviceType = &v
    +	return s
    +}
    +
    +// SetSecurityGroups sets the SecurityGroups field's value.
    +func (s *Instance) SetSecurityGroups(v []*GroupIdentifier) *Instance {
    +	s.SecurityGroups = v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *Instance) SetSourceDestCheck(v bool) *Instance {
    +	s.SourceDestCheck = &v
    +	return s
    +}
    +
    +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value.
    +func (s *Instance) SetSpotInstanceRequestId(v string) *Instance {
    +	s.SpotInstanceRequestId = &v
    +	return s
    +}
    +
    +// SetSriovNetSupport sets the SriovNetSupport field's value.
    +func (s *Instance) SetSriovNetSupport(v string) *Instance {
    +	s.SriovNetSupport = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Instance) SetState(v *InstanceState) *Instance {
    +	s.State = v
    +	return s
    +}
    +
    +// SetStateReason sets the StateReason field's value.
    +func (s *Instance) SetStateReason(v *StateReason) *Instance {
    +	s.StateReason = v
    +	return s
    +}
    +
    +// SetStateTransitionReason sets the StateTransitionReason field's value.
    +func (s *Instance) SetStateTransitionReason(v string) *Instance {
    +	s.StateTransitionReason = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *Instance) SetSubnetId(v string) *Instance {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *Instance) SetTags(v []*Tag) *Instance {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVirtualizationType sets the VirtualizationType field's value.
    +func (s *Instance) SetVirtualizationType(v string) *Instance {
    +	s.VirtualizationType = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *Instance) SetVpcId(v string) *Instance {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a block device mapping.
    +type InstanceBlockDeviceMapping struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The device name exposed to the instance (for example, /dev/sdh or xvdh).
    +	DeviceName *string `locationName:"deviceName" type:"string"`
    +
    +	// Parameters used to automatically set up EBS volumes when the instance is
    +	// launched.
    +	Ebs *EbsInstanceBlockDevice `locationName:"ebs" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceBlockDeviceMapping) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceBlockDeviceMapping) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeviceName sets the DeviceName field's value.
    +func (s *InstanceBlockDeviceMapping) SetDeviceName(v string) *InstanceBlockDeviceMapping {
    +	s.DeviceName = &v
    +	return s
    +}
    +
    +// SetEbs sets the Ebs field's value.
    +func (s *InstanceBlockDeviceMapping) SetEbs(v *EbsInstanceBlockDevice) *InstanceBlockDeviceMapping {
    +	s.Ebs = v
    +	return s
    +}
    +
    +// Describes a block device mapping entry.
    +type InstanceBlockDeviceMappingSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The device name exposed to the instance (for example, /dev/sdh or xvdh).
    +	DeviceName *string `locationName:"deviceName" type:"string"`
    +
    +	// Parameters used to automatically set up EBS volumes when the instance is
    +	// launched.
    +	Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"`
    +
    +	// suppress the specified device included in the block device mapping.
    +	NoDevice *string `locationName:"noDevice" type:"string"`
    +
    +	// The virtual device name.
    +	VirtualName *string `locationName:"virtualName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceBlockDeviceMappingSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceBlockDeviceMappingSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeviceName sets the DeviceName field's value.
    +func (s *InstanceBlockDeviceMappingSpecification) SetDeviceName(v string) *InstanceBlockDeviceMappingSpecification {
    +	s.DeviceName = &v
    +	return s
    +}
    +
    +// SetEbs sets the Ebs field's value.
    +func (s *InstanceBlockDeviceMappingSpecification) SetEbs(v *EbsInstanceBlockDeviceSpecification) *InstanceBlockDeviceMappingSpecification {
    +	s.Ebs = v
    +	return s
    +}
    +
    +// SetNoDevice sets the NoDevice field's value.
    +func (s *InstanceBlockDeviceMappingSpecification) SetNoDevice(v string) *InstanceBlockDeviceMappingSpecification {
    +	s.NoDevice = &v
    +	return s
    +}
    +
    +// SetVirtualName sets the VirtualName field's value.
    +func (s *InstanceBlockDeviceMappingSpecification) SetVirtualName(v string) *InstanceBlockDeviceMappingSpecification {
    +	s.VirtualName = &v
    +	return s
    +}
    +
    +// Information about the instance type that the Dedicated Host supports.
    +type InstanceCapacity struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of instances that can still be launched onto the Dedicated Host.
    +	AvailableCapacity *int64 `locationName:"availableCapacity" type:"integer"`
    +
    +	// The instance type size supported by the Dedicated Host.
    +	InstanceType *string `locationName:"instanceType" type:"string"`
    +
    +	// The total number of instances that can be launched onto the Dedicated Host.
    +	TotalCapacity *int64 `locationName:"totalCapacity" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceCapacity) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceCapacity) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailableCapacity sets the AvailableCapacity field's value.
    +func (s *InstanceCapacity) SetAvailableCapacity(v int64) *InstanceCapacity {
    +	s.AvailableCapacity = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *InstanceCapacity) SetInstanceType(v string) *InstanceCapacity {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetTotalCapacity sets the TotalCapacity field's value.
    +func (s *InstanceCapacity) SetTotalCapacity(v int64) *InstanceCapacity {
    +	s.TotalCapacity = &v
    +	return s
    +}
    +
    +// Describes a Reserved Instance listing state.
    +type InstanceCount struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of listed Reserved Instances in the state specified by the state.
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer"`
    +
    +	// The states of the listed Reserved Instances.
    +	State *string `locationName:"state" type:"string" enum:"ListingState"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceCount) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceCount) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *InstanceCount) SetInstanceCount(v int64) *InstanceCount {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *InstanceCount) SetState(v string) *InstanceCount {
    +	s.State = &v
    +	return s
    +}
    +
    +// Describes an instance to export.
    +type InstanceExportDetails struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the resource being exported.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The target virtualization environment.
    +	TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceExportDetails) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceExportDetails) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *InstanceExportDetails) SetInstanceId(v string) *InstanceExportDetails {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetTargetEnvironment sets the TargetEnvironment field's value.
    +func (s *InstanceExportDetails) SetTargetEnvironment(v string) *InstanceExportDetails {
    +	s.TargetEnvironment = &v
    +	return s
    +}
    +
    +// Describes the monitoring information of the instance.
    +type InstanceMonitoring struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The monitoring information.
    +	Monitoring *Monitoring `locationName:"monitoring" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceMonitoring) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceMonitoring) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *InstanceMonitoring) SetInstanceId(v string) *InstanceMonitoring {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *InstanceMonitoring) SetMonitoring(v *Monitoring) *InstanceMonitoring {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// Describes a network interface.
    +type InstanceNetworkInterface struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The association information for an Elastic IP associated with the network
    +	// interface.
    +	Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"`
    +
    +	// The network interface attachment.
    +	Attachment *InstanceNetworkInterfaceAttachment `locationName:"attachment" type:"structure"`
    +
    +	// The description.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// One or more security groups.
    +	Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The MAC address.
    +	MacAddress *string `locationName:"macAddress" type:"string"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The ID of the AWS account that created the network interface.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// The private DNS name.
    +	PrivateDnsName *string `locationName:"privateDnsName" type:"string"`
    +
    +	// The IP address of the network interface within the subnet.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// The private IP addresses associated with the network interface.
    +	PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether to validate network traffic to or from this network interface.
    +	SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"`
    +
    +	// The status of the network interface.
    +	Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"`
    +
    +	// The ID of the subnet.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceNetworkInterface) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceNetworkInterface) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociation sets the Association field's value.
    +func (s *InstanceNetworkInterface) SetAssociation(v *InstanceNetworkInterfaceAssociation) *InstanceNetworkInterface {
    +	s.Association = v
    +	return s
    +}
    +
    +// SetAttachment sets the Attachment field's value.
    +func (s *InstanceNetworkInterface) SetAttachment(v *InstanceNetworkInterfaceAttachment) *InstanceNetworkInterface {
    +	s.Attachment = v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *InstanceNetworkInterface) SetDescription(v string) *InstanceNetworkInterface {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *InstanceNetworkInterface) SetGroups(v []*GroupIdentifier) *InstanceNetworkInterface {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetMacAddress sets the MacAddress field's value.
    +func (s *InstanceNetworkInterface) SetMacAddress(v string) *InstanceNetworkInterface {
    +	s.MacAddress = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *InstanceNetworkInterface) SetNetworkInterfaceId(v string) *InstanceNetworkInterface {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *InstanceNetworkInterface) SetOwnerId(v string) *InstanceNetworkInterface {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetPrivateDnsName sets the PrivateDnsName field's value.
    +func (s *InstanceNetworkInterface) SetPrivateDnsName(v string) *InstanceNetworkInterface {
    +	s.PrivateDnsName = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *InstanceNetworkInterface) SetPrivateIpAddress(v string) *InstanceNetworkInterface {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
    +func (s *InstanceNetworkInterface) SetPrivateIpAddresses(v []*InstancePrivateIpAddress) *InstanceNetworkInterface {
    +	s.PrivateIpAddresses = v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *InstanceNetworkInterface) SetSourceDestCheck(v bool) *InstanceNetworkInterface {
    +	s.SourceDestCheck = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *InstanceNetworkInterface) SetStatus(v string) *InstanceNetworkInterface {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *InstanceNetworkInterface) SetSubnetId(v string) *InstanceNetworkInterface {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *InstanceNetworkInterface) SetVpcId(v string) *InstanceNetworkInterface {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes association information for an Elastic IP address.
    +type InstanceNetworkInterfaceAssociation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the owner of the Elastic IP address.
    +	IpOwnerId *string `locationName:"ipOwnerId" type:"string"`
    +
    +	// The public DNS name.
    +	PublicDnsName *string `locationName:"publicDnsName" type:"string"`
    +
    +	// The public IP address or Elastic IP address bound to the network interface.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceNetworkInterfaceAssociation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceNetworkInterfaceAssociation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetIpOwnerId sets the IpOwnerId field's value.
    +func (s *InstanceNetworkInterfaceAssociation) SetIpOwnerId(v string) *InstanceNetworkInterfaceAssociation {
    +	s.IpOwnerId = &v
    +	return s
    +}
    +
    +// SetPublicDnsName sets the PublicDnsName field's value.
    +func (s *InstanceNetworkInterfaceAssociation) SetPublicDnsName(v string) *InstanceNetworkInterfaceAssociation {
    +	s.PublicDnsName = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *InstanceNetworkInterfaceAssociation) SetPublicIp(v string) *InstanceNetworkInterfaceAssociation {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Describes a network interface attachment.
    +type InstanceNetworkInterfaceAttachment struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The time stamp when the attachment initiated.
    +	AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The ID of the network interface attachment.
    +	AttachmentId *string `locationName:"attachmentId" type:"string"`
    +
    +	// Indicates whether the network interface is deleted when the instance is terminated.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// The index of the device on the instance for the network interface attachment.
    +	DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"`
    +
    +	// The attachment state.
    +	Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceNetworkInterfaceAttachment) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceNetworkInterfaceAttachment) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachTime sets the AttachTime field's value.
    +func (s *InstanceNetworkInterfaceAttachment) SetAttachTime(v time.Time) *InstanceNetworkInterfaceAttachment {
    +	s.AttachTime = &v
    +	return s
    +}
    +
    +// SetAttachmentId sets the AttachmentId field's value.
    +func (s *InstanceNetworkInterfaceAttachment) SetAttachmentId(v string) *InstanceNetworkInterfaceAttachment {
    +	s.AttachmentId = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *InstanceNetworkInterfaceAttachment) SetDeleteOnTermination(v bool) *InstanceNetworkInterfaceAttachment {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetDeviceIndex sets the DeviceIndex field's value.
    +func (s *InstanceNetworkInterfaceAttachment) SetDeviceIndex(v int64) *InstanceNetworkInterfaceAttachment {
    +	s.DeviceIndex = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetworkInterfaceAttachment {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes a network interface.
    +type InstanceNetworkInterfaceSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether to assign a public IP address to an instance you launch
    +	// in a VPC. The public IP address can only be assigned to a network interface
    +	// for eth0, and can only be assigned to a new network interface, not an existing
    +	// one. You cannot specify more than one network interface in the request. If
    +	// launching into a default subnet, the default value is true.
    +	AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"`
    +
    +	// If set to true, the interface is deleted when the instance is terminated.
    +	// You can specify true only if creating a new network interface when launching
    +	// an instance.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// The description of the network interface. Applies only if creating a network
    +	// interface when launching an instance.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The index of the device on the instance for the network interface attachment.
    +	// If you are specifying a network interface in a RunInstances request, you
    +	// must provide the device index.
    +	DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"`
    +
    +	// The IDs of the security groups for the network interface. Applies only if
    +	// creating a network interface when launching an instance.
    +	Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The private IP address of the network interface. Applies only if creating
    +	// a network interface when launching an instance. You cannot specify this option
    +	// if you're launching more than one instance in a RunInstances request.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// One or more private IP addresses to assign to the network interface. Only
    +	// one private IP address can be designated as primary. You cannot specify this
    +	// option if you're launching more than one instance in a RunInstances request.
    +	PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"`
    +
    +	// The number of secondary private IP addresses. You can't specify this option
    +	// and specify more than one private IP address using the private IP addresses
    +	// option. You cannot specify this option if you're launching more than one
    +	// instance in a RunInstances request.
    +	SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"`
    +
    +	// The ID of the subnet associated with the network string. Applies only if
    +	// creating a network interface when launching an instance.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceNetworkInterfaceSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceNetworkInterfaceSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *InstanceNetworkInterfaceSpecification) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "InstanceNetworkInterfaceSpecification"}
    +	if s.PrivateIpAddresses != nil {
    +		for i, v := range s.PrivateIpAddresses {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *InstanceNetworkInterfaceSpecification {
    +	s.AssociatePublicIpAddress = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetDeleteOnTermination(v bool) *InstanceNetworkInterfaceSpecification {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetDescription(v string) *InstanceNetworkInterfaceSpecification {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDeviceIndex sets the DeviceIndex field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetDeviceIndex(v int64) *InstanceNetworkInterfaceSpecification {
    +	s.DeviceIndex = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetGroups(v []*string) *InstanceNetworkInterfaceSpecification {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *InstanceNetworkInterfaceSpecification {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetPrivateIpAddress(v string) *InstanceNetworkInterfaceSpecification {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetPrivateIpAddresses(v []*PrivateIpAddressSpecification) *InstanceNetworkInterfaceSpecification {
    +	s.PrivateIpAddresses = v
    +	return s
    +}
    +
    +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetSecondaryPrivateIpAddressCount(v int64) *InstanceNetworkInterfaceSpecification {
    +	s.SecondaryPrivateIpAddressCount = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *InstanceNetworkInterfaceSpecification) SetSubnetId(v string) *InstanceNetworkInterfaceSpecification {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Describes a private IP address.
    +type InstancePrivateIpAddress struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The association information for an Elastic IP address for the network interface.
    +	Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"`
    +
    +	// Indicates whether this IP address is the primary private IP address of the
    +	// network interface.
    +	Primary *bool `locationName:"primary" type:"boolean"`
    +
    +	// The private DNS name.
    +	PrivateDnsName *string `locationName:"privateDnsName" type:"string"`
    +
    +	// The private IP address of the network interface.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s InstancePrivateIpAddress) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstancePrivateIpAddress) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociation sets the Association field's value.
    +func (s *InstancePrivateIpAddress) SetAssociation(v *InstanceNetworkInterfaceAssociation) *InstancePrivateIpAddress {
    +	s.Association = v
    +	return s
    +}
    +
    +// SetPrimary sets the Primary field's value.
    +func (s *InstancePrivateIpAddress) SetPrimary(v bool) *InstancePrivateIpAddress {
    +	s.Primary = &v
    +	return s
    +}
    +
    +// SetPrivateDnsName sets the PrivateDnsName field's value.
    +func (s *InstancePrivateIpAddress) SetPrivateDnsName(v string) *InstancePrivateIpAddress {
    +	s.PrivateDnsName = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivateIpAddress {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// Describes the current state of the instance.
    +type InstanceState struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The low byte represents the state. The high byte is an opaque internal value
    +	// and should be ignored.
    +	//
    +	//    * 0 : pending
    +	//
    +	//    * 16 : running
    +	//
    +	//    * 32 : shutting-down
    +	//
    +	//    * 48 : terminated
    +	//
    +	//    * 64 : stopping
    +	//
    +	//    * 80 : stopped
    +	Code *int64 `locationName:"code" type:"integer"`
    +
    +	// The current state of the instance.
    +	Name *string `locationName:"name" type:"string" enum:"InstanceStateName"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceState) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceState) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *InstanceState) SetCode(v int64) *InstanceState {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *InstanceState) SetName(v string) *InstanceState {
    +	s.Name = &v
    +	return s
    +}
    +
    +// Describes an instance state change.
    +type InstanceStateChange struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The current state of the instance.
    +	CurrentState *InstanceState `locationName:"currentState" type:"structure"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The previous state of the instance.
    +	PreviousState *InstanceState `locationName:"previousState" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceStateChange) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceStateChange) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrentState sets the CurrentState field's value.
    +func (s *InstanceStateChange) SetCurrentState(v *InstanceState) *InstanceStateChange {
    +	s.CurrentState = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *InstanceStateChange) SetInstanceId(v string) *InstanceStateChange {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetPreviousState sets the PreviousState field's value.
    +func (s *InstanceStateChange) SetPreviousState(v *InstanceState) *InstanceStateChange {
    +	s.PreviousState = v
    +	return s
    +}
    +
    +// Describes the status of an instance.
    +type InstanceStatus struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone of the instance.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// Any scheduled events associated with the instance.
    +	Events []*InstanceStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The intended state of the instance. DescribeInstanceStatus requires that
    +	// an instance be in the running state.
    +	InstanceState *InstanceState `locationName:"instanceState" type:"structure"`
    +
    +	// Reports impaired functionality that stems from issues internal to the instance,
    +	// such as impaired reachability.
    +	InstanceStatus *InstanceStatusSummary `locationName:"instanceStatus" type:"structure"`
    +
    +	// Reports impaired functionality that stems from issues related to the systems
    +	// that support an instance, such as hardware failures and network connectivity
    +	// problems.
    +	SystemStatus *InstanceStatusSummary `locationName:"systemStatus" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceStatus) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceStatus) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *InstanceStatus) SetAvailabilityZone(v string) *InstanceStatus {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetEvents sets the Events field's value.
    +func (s *InstanceStatus) SetEvents(v []*InstanceStatusEvent) *InstanceStatus {
    +	s.Events = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *InstanceStatus) SetInstanceId(v string) *InstanceStatus {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceState sets the InstanceState field's value.
    +func (s *InstanceStatus) SetInstanceState(v *InstanceState) *InstanceStatus {
    +	s.InstanceState = v
    +	return s
    +}
    +
    +// SetInstanceStatus sets the InstanceStatus field's value.
    +func (s *InstanceStatus) SetInstanceStatus(v *InstanceStatusSummary) *InstanceStatus {
    +	s.InstanceStatus = v
    +	return s
    +}
    +
    +// SetSystemStatus sets the SystemStatus field's value.
    +func (s *InstanceStatus) SetSystemStatus(v *InstanceStatusSummary) *InstanceStatus {
    +	s.SystemStatus = v
    +	return s
    +}
    +
    +// Describes the instance status.
    +type InstanceStatusDetails struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The time when a status check failed. For an instance that was launched and
    +	// impaired, this is the time when the instance was launched.
    +	ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The type of instance status.
    +	Name *string `locationName:"name" type:"string" enum:"StatusName"`
    +
    +	// The status.
    +	Status *string `locationName:"status" type:"string" enum:"StatusType"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceStatusDetails) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceStatusDetails) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImpairedSince sets the ImpairedSince field's value.
    +func (s *InstanceStatusDetails) SetImpairedSince(v time.Time) *InstanceStatusDetails {
    +	s.ImpairedSince = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *InstanceStatusDetails) SetName(v string) *InstanceStatusDetails {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *InstanceStatusDetails) SetStatus(v string) *InstanceStatusDetails {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes a scheduled event for an instance.
    +type InstanceStatusEvent struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The event code.
    +	Code *string `locationName:"code" type:"string" enum:"EventCode"`
    +
    +	// A description of the event.
    +	//
    +	// After a scheduled event is completed, it can still be described for up to
    +	// a week. If the event has been completed, this description starts with the
    +	// following text: [Completed].
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The latest scheduled end time for the event.
    +	NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The earliest scheduled start time for the event.
    +	NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceStatusEvent) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceStatusEvent) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *InstanceStatusEvent) SetCode(v string) *InstanceStatusEvent {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *InstanceStatusEvent) SetDescription(v string) *InstanceStatusEvent {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetNotAfter sets the NotAfter field's value.
    +func (s *InstanceStatusEvent) SetNotAfter(v time.Time) *InstanceStatusEvent {
    +	s.NotAfter = &v
    +	return s
    +}
    +
    +// SetNotBefore sets the NotBefore field's value.
    +func (s *InstanceStatusEvent) SetNotBefore(v time.Time) *InstanceStatusEvent {
    +	s.NotBefore = &v
    +	return s
    +}
    +
    +// Describes the status of an instance.
    +type InstanceStatusSummary struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The system instance health or application instance health.
    +	Details []*InstanceStatusDetails `locationName:"details" locationNameList:"item" type:"list"`
    +
    +	// The status.
    +	Status *string `locationName:"status" type:"string" enum:"SummaryStatus"`
    +}
    +
    +// String returns the string representation
    +func (s InstanceStatusSummary) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InstanceStatusSummary) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDetails sets the Details field's value.
    +func (s *InstanceStatusSummary) SetDetails(v []*InstanceStatusDetails) *InstanceStatusSummary {
    +	s.Details = v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes an Internet gateway.
    +type InternetGateway struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Any VPCs attached to the Internet gateway.
    +	Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the Internet gateway.
    +	InternetGatewayId *string `locationName:"internetGatewayId" type:"string"`
    +
    +	// Any tags assigned to the Internet gateway.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s InternetGateway) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InternetGateway) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachments sets the Attachments field's value.
    +func (s *InternetGateway) SetAttachments(v []*InternetGatewayAttachment) *InternetGateway {
    +	s.Attachments = v
    +	return s
    +}
    +
    +// SetInternetGatewayId sets the InternetGatewayId field's value.
    +func (s *InternetGateway) SetInternetGatewayId(v string) *InternetGateway {
    +	s.InternetGatewayId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *InternetGateway) SetTags(v []*Tag) *InternetGateway {
    +	s.Tags = v
    +	return s
    +}
    +
    +// Describes the attachment of a VPC to an Internet gateway.
    +type InternetGatewayAttachment struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The current state of the attachment.
    +	State *string `locationName:"state" type:"string" enum:"AttachmentStatus"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s InternetGatewayAttachment) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s InternetGatewayAttachment) GoString() string {
    +	return s.String()
    +}
    +
    +// SetState sets the State field's value.
    +func (s *InternetGatewayAttachment) SetState(v string) *InternetGatewayAttachment {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *InternetGatewayAttachment) SetVpcId(v string) *InternetGatewayAttachment {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a security group rule.
    +type IpPermission struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The start of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// A value of -1 indicates all ICMP types.
    +	FromPort *int64 `locationName:"fromPort" type:"integer"`
    +
    +	// The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers
    +	// (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)).
    +	//
    +	// [EC2-VPC only] When you authorize or revoke security group rules, you can
    +	// use -1 to specify all.
    +	IpProtocol *string `locationName:"ipProtocol" type:"string"`
    +
    +	// One or more IP ranges.
    +	IpRanges []*IpRange `locationName:"ipRanges" locationNameList:"item" type:"list"`
    +
    +	// (Valid for AuthorizeSecurityGroupEgress, RevokeSecurityGroupEgress and DescribeSecurityGroups
    +	// only) One or more prefix list IDs for an AWS service. In an AuthorizeSecurityGroupEgress
    +	// request, this is the AWS service that you want to access through a VPC endpoint
    +	// from instances associated with the security group.
    +	PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"`
    +
    +	// The end of port range for the TCP and UDP protocols, or an ICMP code. A value
    +	// of -1 indicates all ICMP codes for the specified ICMP type.
    +	ToPort *int64 `locationName:"toPort" type:"integer"`
    +
    +	// One or more security group and AWS account ID pairs.
    +	UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s IpPermission) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s IpPermission) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFromPort sets the FromPort field's value.
    +func (s *IpPermission) SetFromPort(v int64) *IpPermission {
    +	s.FromPort = &v
    +	return s
    +}
    +
    +// SetIpProtocol sets the IpProtocol field's value.
    +func (s *IpPermission) SetIpProtocol(v string) *IpPermission {
    +	s.IpProtocol = &v
    +	return s
    +}
    +
    +// SetIpRanges sets the IpRanges field's value.
    +func (s *IpPermission) SetIpRanges(v []*IpRange) *IpPermission {
    +	s.IpRanges = v
    +	return s
    +}
    +
    +// SetPrefixListIds sets the PrefixListIds field's value.
    +func (s *IpPermission) SetPrefixListIds(v []*PrefixListId) *IpPermission {
    +	s.PrefixListIds = v
    +	return s
    +}
    +
    +// SetToPort sets the ToPort field's value.
    +func (s *IpPermission) SetToPort(v int64) *IpPermission {
    +	s.ToPort = &v
    +	return s
    +}
    +
    +// SetUserIdGroupPairs sets the UserIdGroupPairs field's value.
    +func (s *IpPermission) SetUserIdGroupPairs(v []*UserIdGroupPair) *IpPermission {
    +	s.UserIdGroupPairs = v
    +	return s
    +}
    +
    +// Describes an IP range.
    +type IpRange struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR range. You can either specify a CIDR range or a source security
    +	// group, not both.
    +	CidrIp *string `locationName:"cidrIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s IpRange) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s IpRange) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrIp sets the CidrIp field's value.
    +func (s *IpRange) SetCidrIp(v string) *IpRange {
    +	s.CidrIp = &v
    +	return s
    +}
    +
    +// Describes a key pair.
    +type KeyPairInfo struct {
    +	_ struct{} `type:"structure"`
    +
    +	// If you used CreateKeyPair to create the key pair, this is the SHA-1 digest
    +	// of the DER encoded private key. If you used ImportKeyPair to provide AWS
    +	// the public key, this is the MD5 public key fingerprint as specified in section
    +	// 4 of RFC4716.
    +	KeyFingerprint *string `locationName:"keyFingerprint" type:"string"`
    +
    +	// The name of the key pair.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s KeyPairInfo) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s KeyPairInfo) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKeyFingerprint sets the KeyFingerprint field's value.
    +func (s *KeyPairInfo) SetKeyFingerprint(v string) *KeyPairInfo {
    +	s.KeyFingerprint = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *KeyPairInfo) SetKeyName(v string) *KeyPairInfo {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// Describes a launch permission.
    +type LaunchPermission struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the group.
    +	Group *string `locationName:"group" type:"string" enum:"PermissionGroup"`
    +
    +	// The AWS account ID.
    +	UserId *string `locationName:"userId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s LaunchPermission) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s LaunchPermission) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroup sets the Group field's value.
    +func (s *LaunchPermission) SetGroup(v string) *LaunchPermission {
    +	s.Group = &v
    +	return s
    +}
    +
    +// SetUserId sets the UserId field's value.
    +func (s *LaunchPermission) SetUserId(v string) *LaunchPermission {
    +	s.UserId = &v
    +	return s
    +}
    +
    +// Describes a launch permission modification.
    +type LaunchPermissionModifications struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The AWS account ID to add to the list of launch permissions for the AMI.
    +	Add []*LaunchPermission `locationNameList:"item" type:"list"`
    +
    +	// The AWS account ID to remove from the list of launch permissions for the
    +	// AMI.
    +	Remove []*LaunchPermission `locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s LaunchPermissionModifications) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s LaunchPermissionModifications) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAdd sets the Add field's value.
    +func (s *LaunchPermissionModifications) SetAdd(v []*LaunchPermission) *LaunchPermissionModifications {
    +	s.Add = v
    +	return s
    +}
    +
    +// SetRemove sets the Remove field's value.
    +func (s *LaunchPermissionModifications) SetRemove(v []*LaunchPermission) *LaunchPermissionModifications {
    +	s.Remove = v
    +	return s
    +}
    +
    +// Describes the launch specification for an instance.
    +type LaunchSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Deprecated.
    +	AddressingType *string `locationName:"addressingType" type:"string"`
    +
    +	// One or more block device mapping entries.
    +	//
    +	// Although you can specify encrypted EBS volumes in this block device mapping
    +	// for your Spot Instances, these volumes are not encrypted.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether the instance is optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal EBS I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS
    +	// Optimized instance.
    +	//
    +	// Default: false
    +	EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
    +
    +	// The IAM instance profile.
    +	IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"`
    +
    +	// The ID of the AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// The ID of the kernel.
    +	KernelId *string `locationName:"kernelId" type:"string"`
    +
    +	// The name of the key pair.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +
    +	// Describes the monitoring for the instance.
    +	Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"`
    +
    +	// One or more network interfaces.
    +	NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
    +
    +	// The placement information for the instance.
    +	Placement *SpotPlacement `locationName:"placement" type:"structure"`
    +
    +	// The ID of the RAM disk.
    +	RamdiskId *string `locationName:"ramdiskId" type:"string"`
    +
    +	// One or more security groups. When requesting instances in a VPC, you must
    +	// specify the IDs of the security groups. When requesting instances in EC2-Classic,
    +	// you can specify the names or the IDs of the security groups.
    +	SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the subnet in which to launch the instance.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// The user data to make available to the instances. If you are using an AWS
    +	// SDK or command line tool, Base64-encoding is performed for you, and you can
    +	// load the text from a file. Otherwise, you must provide Base64-encoded text.
    +	UserData *string `locationName:"userData" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s LaunchSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s LaunchSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAddressingType sets the AddressingType field's value.
    +func (s *LaunchSpecification) SetAddressingType(v string) *LaunchSpecification {
    +	s.AddressingType = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *LaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *LaunchSpecification {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *LaunchSpecification) SetEbsOptimized(v bool) *LaunchSpecification {
    +	s.EbsOptimized = &v
    +	return s
    +}
    +
    +// SetIamInstanceProfile sets the IamInstanceProfile field's value.
    +func (s *LaunchSpecification) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *LaunchSpecification {
    +	s.IamInstanceProfile = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *LaunchSpecification) SetImageId(v string) *LaunchSpecification {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *LaunchSpecification) SetInstanceType(v string) *LaunchSpecification {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *LaunchSpecification) SetKernelId(v string) *LaunchSpecification {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *LaunchSpecification) SetKeyName(v string) *LaunchSpecification {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *LaunchSpecification) SetMonitoring(v *RunInstancesMonitoringEnabled) *LaunchSpecification {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *LaunchSpecification) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *LaunchSpecification {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *LaunchSpecification) SetPlacement(v *SpotPlacement) *LaunchSpecification {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *LaunchSpecification) SetRamdiskId(v string) *LaunchSpecification {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetSecurityGroups sets the SecurityGroups field's value.
    +func (s *LaunchSpecification) SetSecurityGroups(v []*GroupIdentifier) *LaunchSpecification {
    +	s.SecurityGroups = v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *LaunchSpecification) SetSubnetId(v string) *LaunchSpecification {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *LaunchSpecification) SetUserData(v string) *LaunchSpecification {
    +	s.UserData = &v
    +	return s
    +}
    +
    +// Contains the parameters for ModifyHosts.
    +type ModifyHostsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Specify whether to enable or disable auto-placement.
    +	//
    +	// AutoPlacement is a required field
    +	AutoPlacement *string `locationName:"autoPlacement" type:"string" required:"true" enum:"AutoPlacement"`
    +
    +	// The host IDs of the Dedicated Hosts you want to modify.
    +	//
    +	// HostIds is a required field
    +	HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyHostsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyHostsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyHostsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyHostsInput"}
    +	if s.AutoPlacement == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AutoPlacement"))
    +	}
    +	if s.HostIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("HostIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAutoPlacement sets the AutoPlacement field's value.
    +func (s *ModifyHostsInput) SetAutoPlacement(v string) *ModifyHostsInput {
    +	s.AutoPlacement = &v
    +	return s
    +}
    +
    +// SetHostIds sets the HostIds field's value.
    +func (s *ModifyHostsInput) SetHostIds(v []*string) *ModifyHostsInput {
    +	s.HostIds = v
    +	return s
    +}
    +
    +// Contains the output of ModifyHosts.
    +type ModifyHostsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IDs of the Dedicated Hosts that were successfully modified.
    +	Successful []*string `locationName:"successful" locationNameList:"item" type:"list"`
    +
    +	// The IDs of the Dedicated Hosts that could not be modified. Check whether
    +	// the setting you requested can be used.
    +	Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyHostsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyHostsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSuccessful sets the Successful field's value.
    +func (s *ModifyHostsOutput) SetSuccessful(v []*string) *ModifyHostsOutput {
    +	s.Successful = v
    +	return s
    +}
    +
    +// SetUnsuccessful sets the Unsuccessful field's value.
    +func (s *ModifyHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ModifyHostsOutput {
    +	s.Unsuccessful = v
    +	return s
    +}
    +
    +// Contains the parameters of ModifyIdFormat.
    +type ModifyIdFormatInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The type of resource: instance | reservation | snapshot | volume
    +	//
    +	// Resource is a required field
    +	Resource *string `type:"string" required:"true"`
    +
    +	// Indicate whether the resource should use longer IDs (17-character IDs).
    +	//
    +	// UseLongIds is a required field
    +	UseLongIds *bool `type:"boolean" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyIdFormatInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyIdFormatInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyIdFormatInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyIdFormatInput"}
    +	if s.Resource == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Resource"))
    +	}
    +	if s.UseLongIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("UseLongIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetResource sets the Resource field's value.
    +func (s *ModifyIdFormatInput) SetResource(v string) *ModifyIdFormatInput {
    +	s.Resource = &v
    +	return s
    +}
    +
    +// SetUseLongIds sets the UseLongIds field's value.
    +func (s *ModifyIdFormatInput) SetUseLongIds(v bool) *ModifyIdFormatInput {
    +	s.UseLongIds = &v
    +	return s
    +}
    +
    +type ModifyIdFormatOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyIdFormatOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyIdFormatOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters of ModifyIdentityIdFormat.
    +type ModifyIdentityIdFormatInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ARN of the principal, which can be an IAM user, IAM role, or the root
    +	// user. Specify all to modify the ID format for all IAM users, IAM roles, and
    +	// the root user of the account.
    +	//
    +	// PrincipalArn is a required field
    +	PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"`
    +
    +	// The type of resource: instance | reservation | snapshot | volume
    +	//
    +	// Resource is a required field
    +	Resource *string `locationName:"resource" type:"string" required:"true"`
    +
    +	// Indicates whether the resource should use longer IDs (17-character IDs)
    +	//
    +	// UseLongIds is a required field
    +	UseLongIds *bool `locationName:"useLongIds" type:"boolean" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyIdentityIdFormatInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyIdentityIdFormatInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyIdentityIdFormatInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyIdentityIdFormatInput"}
    +	if s.PrincipalArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
    +	}
    +	if s.Resource == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Resource"))
    +	}
    +	if s.UseLongIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("UseLongIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetPrincipalArn sets the PrincipalArn field's value.
    +func (s *ModifyIdentityIdFormatInput) SetPrincipalArn(v string) *ModifyIdentityIdFormatInput {
    +	s.PrincipalArn = &v
    +	return s
    +}
    +
    +// SetResource sets the Resource field's value.
    +func (s *ModifyIdentityIdFormatInput) SetResource(v string) *ModifyIdentityIdFormatInput {
    +	s.Resource = &v
    +	return s
    +}
    +
    +// SetUseLongIds sets the UseLongIds field's value.
    +func (s *ModifyIdentityIdFormatInput) SetUseLongIds(v bool) *ModifyIdentityIdFormatInput {
    +	s.UseLongIds = &v
    +	return s
    +}
    +
    +type ModifyIdentityIdFormatOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyIdentityIdFormatOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyIdentityIdFormatOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyImageAttribute.
    +type ModifyImageAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the attribute to modify.
    +	Attribute *string `type:"string"`
    +
    +	// A description for the AMI.
    +	Description *AttributeValue `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the AMI.
    +	//
    +	// ImageId is a required field
    +	ImageId *string `type:"string" required:"true"`
    +
    +	// A launch permission modification.
    +	LaunchPermission *LaunchPermissionModifications `type:"structure"`
    +
    +	// The operation type.
    +	OperationType *string `type:"string" enum:"OperationType"`
    +
    +	// One or more product codes. After you add a product code to an AMI, it can't
    +	// be removed. This is only valid when modifying the productCodes attribute.
    +	ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"`
    +
    +	// One or more user groups. This is only valid when modifying the launchPermission
    +	// attribute.
    +	UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"`
    +
    +	// One or more AWS account IDs. This is only valid when modifying the launchPermission
    +	// attribute.
    +	UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"`
    +
    +	// The value of the attribute being modified. This is only valid when modifying
    +	// the description attribute.
    +	Value *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyImageAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyImageAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyImageAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyImageAttributeInput"}
    +	if s.ImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImageId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *ModifyImageAttributeInput) SetAttribute(v string) *ModifyImageAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ModifyImageAttributeInput) SetDescription(v *AttributeValue) *ModifyImageAttributeInput {
    +	s.Description = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifyImageAttributeInput) SetDryRun(v bool) *ModifyImageAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *ModifyImageAttributeInput) SetImageId(v string) *ModifyImageAttributeInput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetLaunchPermission sets the LaunchPermission field's value.
    +func (s *ModifyImageAttributeInput) SetLaunchPermission(v *LaunchPermissionModifications) *ModifyImageAttributeInput {
    +	s.LaunchPermission = v
    +	return s
    +}
    +
    +// SetOperationType sets the OperationType field's value.
    +func (s *ModifyImageAttributeInput) SetOperationType(v string) *ModifyImageAttributeInput {
    +	s.OperationType = &v
    +	return s
    +}
    +
    +// SetProductCodes sets the ProductCodes field's value.
    +func (s *ModifyImageAttributeInput) SetProductCodes(v []*string) *ModifyImageAttributeInput {
    +	s.ProductCodes = v
    +	return s
    +}
    +
    +// SetUserGroups sets the UserGroups field's value.
    +func (s *ModifyImageAttributeInput) SetUserGroups(v []*string) *ModifyImageAttributeInput {
    +	s.UserGroups = v
    +	return s
    +}
    +
    +// SetUserIds sets the UserIds field's value.
    +func (s *ModifyImageAttributeInput) SetUserIds(v []*string) *ModifyImageAttributeInput {
    +	s.UserIds = v
    +	return s
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *ModifyImageAttributeInput) SetValue(v string) *ModifyImageAttributeInput {
    +	s.Value = &v
    +	return s
    +}
    +
    +type ModifyImageAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyImageAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyImageAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyInstanceAttribute.
    +type ModifyInstanceAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the attribute.
    +	Attribute *string `locationName:"attribute" type:"string" enum:"InstanceAttributeName"`
    +
    +	// Modifies the DeleteOnTermination attribute for volumes that are currently
    +	// attached. The volume must be owned by the caller. If no value is specified
    +	// for DeleteOnTermination, the default is true and the volume is deleted when
    +	// the instance is terminated.
    +	//
    +	// To add instance store volumes to an Amazon EBS-backed instance, you must
    +	// add them when you launch the instance. For more information, see Updating
    +	// the Block Device Mapping when Launching an Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// If the value is true, you can't terminate the instance using the Amazon EC2
    +	// console, CLI, or API; otherwise, you can. You cannot use this paramater for
    +	// Spot Instances.
    +	DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Specifies whether the instance is optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal EBS I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS
    +	// Optimized instance.
    +	EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"`
    +
    +	// Set to true to enable enhanced networking with ENA for the instance.
    +	//
    +	// This option is supported only for HVM instances. Specifying this option with
    +	// a PV instance can make it unreachable.
    +	EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"`
    +
    +	// [EC2-VPC] Changes the security groups of the instance. You must specify at
    +	// least one security group, even if it's just the default security group for
    +	// the VPC. You must specify the security group ID, not the security group name.
    +	Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// Specifies whether an instance stops or terminates when you initiate shutdown
    +	// from the instance (using the operating system command for system shutdown).
    +	InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"`
    +
    +	// Changes the instance type to the specified value. For more information, see
    +	// Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
    +	// If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.
    +	InstanceType *AttributeValue `locationName:"instanceType" type:"structure"`
    +
    +	// Changes the instance's kernel to the specified value. We recommend that you
    +	// use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB
    +	// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html).
    +	Kernel *AttributeValue `locationName:"kernel" type:"structure"`
    +
    +	// Changes the instance's RAM disk to the specified value. We recommend that
    +	// you use PV-GRUB instead of kernels and RAM disks. For more information, see
    +	// PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html).
    +	Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"`
    +
    +	// Specifies whether source/destination checking is enabled. A value of true
    +	// means that checking is enabled, and false means checking is disabled. This
    +	// value must be false for a NAT instance to perform NAT.
    +	SourceDestCheck *AttributeBooleanValue `type:"structure"`
    +
    +	// Set to simple to enable enhanced networking with the Intel 82599 Virtual
    +	// Function interface for the instance.
    +	//
    +	// There is no way to disable enhanced networking with the Intel 82599 Virtual
    +	// Function interface at this time.
    +	//
    +	// This option is supported only for HVM instances. Specifying this option with
    +	// a PV instance can make it unreachable.
    +	SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"`
    +
    +	// Changes the instance's user data to the specified value. If you are using
    +	// an AWS SDK or command line tool, Base64-encoding is performed for you, and
    +	// you can load the text from a file. Otherwise, you must provide Base64-encoded
    +	// text.
    +	UserData *BlobAttributeValue `locationName:"userData" type:"structure"`
    +
    +	// A new value for the attribute. Use only with the kernel, ramdisk, userData,
    +	// disableApiTermination, or instanceInitiatedShutdownBehavior attribute.
    +	Value *string `locationName:"value" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyInstanceAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyInstanceAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyInstanceAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceAttributeInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *ModifyInstanceAttributeInput) SetAttribute(v string) *ModifyInstanceAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *ModifyInstanceAttributeInput) SetBlockDeviceMappings(v []*InstanceBlockDeviceMappingSpecification) *ModifyInstanceAttributeInput {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetDisableApiTermination sets the DisableApiTermination field's value.
    +func (s *ModifyInstanceAttributeInput) SetDisableApiTermination(v *AttributeBooleanValue) *ModifyInstanceAttributeInput {
    +	s.DisableApiTermination = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifyInstanceAttributeInput) SetDryRun(v bool) *ModifyInstanceAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *ModifyInstanceAttributeInput) SetEbsOptimized(v *AttributeBooleanValue) *ModifyInstanceAttributeInput {
    +	s.EbsOptimized = v
    +	return s
    +}
    +
    +// SetEnaSupport sets the EnaSupport field's value.
    +func (s *ModifyInstanceAttributeInput) SetEnaSupport(v *AttributeBooleanValue) *ModifyInstanceAttributeInput {
    +	s.EnaSupport = v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *ModifyInstanceAttributeInput) SetGroups(v []*string) *ModifyInstanceAttributeInput {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ModifyInstanceAttributeInput) SetInstanceId(v string) *ModifyInstanceAttributeInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value.
    +func (s *ModifyInstanceAttributeInput) SetInstanceInitiatedShutdownBehavior(v *AttributeValue) *ModifyInstanceAttributeInput {
    +	s.InstanceInitiatedShutdownBehavior = v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ModifyInstanceAttributeInput) SetInstanceType(v *AttributeValue) *ModifyInstanceAttributeInput {
    +	s.InstanceType = v
    +	return s
    +}
    +
    +// SetKernel sets the Kernel field's value.
    +func (s *ModifyInstanceAttributeInput) SetKernel(v *AttributeValue) *ModifyInstanceAttributeInput {
    +	s.Kernel = v
    +	return s
    +}
    +
    +// SetRamdisk sets the Ramdisk field's value.
    +func (s *ModifyInstanceAttributeInput) SetRamdisk(v *AttributeValue) *ModifyInstanceAttributeInput {
    +	s.Ramdisk = v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *ModifyInstanceAttributeInput) SetSourceDestCheck(v *AttributeBooleanValue) *ModifyInstanceAttributeInput {
    +	s.SourceDestCheck = v
    +	return s
    +}
    +
    +// SetSriovNetSupport sets the SriovNetSupport field's value.
    +func (s *ModifyInstanceAttributeInput) SetSriovNetSupport(v *AttributeValue) *ModifyInstanceAttributeInput {
    +	s.SriovNetSupport = v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *ModifyInstanceAttributeInput) SetUserData(v *BlobAttributeValue) *ModifyInstanceAttributeInput {
    +	s.UserData = v
    +	return s
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *ModifyInstanceAttributeInput) SetValue(v string) *ModifyInstanceAttributeInput {
    +	s.Value = &v
    +	return s
    +}
    +
    +type ModifyInstanceAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyInstanceAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyInstanceAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyInstancePlacement.
    +type ModifyInstancePlacementInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The new affinity setting for the instance.
    +	Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"`
    +
    +	// The ID of the Dedicated Host that the instance will have affinity with.
    +	HostId *string `locationName:"hostId" type:"string"`
    +
    +	// The ID of the instance that you are modifying.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +
    +	// The tenancy of the instance that you are modifying.
    +	Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyInstancePlacementInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyInstancePlacementInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyInstancePlacementInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyInstancePlacementInput"}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAffinity sets the Affinity field's value.
    +func (s *ModifyInstancePlacementInput) SetAffinity(v string) *ModifyInstancePlacementInput {
    +	s.Affinity = &v
    +	return s
    +}
    +
    +// SetHostId sets the HostId field's value.
    +func (s *ModifyInstancePlacementInput) SetHostId(v string) *ModifyInstancePlacementInput {
    +	s.HostId = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ModifyInstancePlacementInput) SetInstanceId(v string) *ModifyInstancePlacementInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetTenancy sets the Tenancy field's value.
    +func (s *ModifyInstancePlacementInput) SetTenancy(v string) *ModifyInstancePlacementInput {
    +	s.Tenancy = &v
    +	return s
    +}
    +
    +// Contains the output of ModifyInstancePlacement.
    +type ModifyInstancePlacementOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Is true if the request succeeds, and an error otherwise.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyInstancePlacementOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyInstancePlacementOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *ModifyInstancePlacementOutput) SetReturn(v bool) *ModifyInstancePlacementOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for ModifyNetworkInterfaceAttribute.
    +type ModifyNetworkInterfaceAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the interface attachment. If modifying the 'delete on termination'
    +	// attribute, you must specify the ID of the interface attachment.
    +	Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"`
    +
    +	// A description for the network interface.
    +	Description *AttributeValue `locationName:"description" type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Changes the security groups for the network interface. The new set of groups
    +	// you specify replaces the current set. You must specify at least one group,
    +	// even if it's just the default security group in the VPC. You must specify
    +	// the ID of the security group, not the name.
    +	Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +
    +	// Indicates whether source/destination checking is enabled. A value of true
    +	// means checking is enabled, and false means checking is disabled. This value
    +	// must be false for a NAT instance to perform NAT. For more information, see
    +	// NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html)
    +	// in the Amazon Virtual Private Cloud User Guide.
    +	SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyNetworkInterfaceAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyNetworkInterfaceAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyNetworkInterfaceAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyNetworkInterfaceAttributeInput"}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttachment sets the Attachment field's value.
    +func (s *ModifyNetworkInterfaceAttributeInput) SetAttachment(v *NetworkInterfaceAttachmentChanges) *ModifyNetworkInterfaceAttributeInput {
    +	s.Attachment = v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ModifyNetworkInterfaceAttributeInput) SetDescription(v *AttributeValue) *ModifyNetworkInterfaceAttributeInput {
    +	s.Description = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifyNetworkInterfaceAttributeInput) SetDryRun(v bool) *ModifyNetworkInterfaceAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *ModifyNetworkInterfaceAttributeInput) SetGroups(v []*string) *ModifyNetworkInterfaceAttributeInput {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *ModifyNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) *ModifyNetworkInterfaceAttributeInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *ModifyNetworkInterfaceAttributeInput) SetSourceDestCheck(v *AttributeBooleanValue) *ModifyNetworkInterfaceAttributeInput {
    +	s.SourceDestCheck = v
    +	return s
    +}
    +
    +type ModifyNetworkInterfaceAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyNetworkInterfaceAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyNetworkInterfaceAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyReservedInstances.
    +type ModifyReservedInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A unique, case-sensitive token you provide to ensure idempotency of your
    +	// modification request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// The IDs of the Reserved Instances to modify.
    +	//
    +	// ReservedInstancesIds is a required field
    +	ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"`
    +
    +	// The configuration settings for the Reserved Instances to modify.
    +	//
    +	// TargetConfigurations is a required field
    +	TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyReservedInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyReservedInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyReservedInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyReservedInstancesInput"}
    +	if s.ReservedInstancesIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReservedInstancesIds"))
    +	}
    +	if s.TargetConfigurations == nil {
    +		invalidParams.Add(request.NewErrParamRequired("TargetConfigurations"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *ModifyReservedInstancesInput) SetClientToken(v string) *ModifyReservedInstancesInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetReservedInstancesIds sets the ReservedInstancesIds field's value.
    +func (s *ModifyReservedInstancesInput) SetReservedInstancesIds(v []*string) *ModifyReservedInstancesInput {
    +	s.ReservedInstancesIds = v
    +	return s
    +}
    +
    +// SetTargetConfigurations sets the TargetConfigurations field's value.
    +func (s *ModifyReservedInstancesInput) SetTargetConfigurations(v []*ReservedInstancesConfiguration) *ModifyReservedInstancesInput {
    +	s.TargetConfigurations = v
    +	return s
    +}
    +
    +// Contains the output of ModifyReservedInstances.
    +type ModifyReservedInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID for the modification.
    +	ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyReservedInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyReservedInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesModificationId sets the ReservedInstancesModificationId field's value.
    +func (s *ModifyReservedInstancesOutput) SetReservedInstancesModificationId(v string) *ModifyReservedInstancesOutput {
    +	s.ReservedInstancesModificationId = &v
    +	return s
    +}
    +
    +// Contains the parameters for ModifySnapshotAttribute.
    +type ModifySnapshotAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The snapshot attribute to modify.
    +	//
    +	// Only volume creation permissions may be modified at the customer level.
    +	Attribute *string `type:"string" enum:"SnapshotAttributeName"`
    +
    +	// A JSON representation of the snapshot attribute modification.
    +	CreateVolumePermission *CreateVolumePermissionModifications `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The group to modify for the snapshot.
    +	GroupNames []*string `locationName:"UserGroup" locationNameList:"GroupName" type:"list"`
    +
    +	// The type of operation to perform to the attribute.
    +	OperationType *string `type:"string" enum:"OperationType"`
    +
    +	// The ID of the snapshot.
    +	//
    +	// SnapshotId is a required field
    +	SnapshotId *string `type:"string" required:"true"`
    +
    +	// The account ID to modify for the snapshot.
    +	UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s ModifySnapshotAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifySnapshotAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifySnapshotAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifySnapshotAttributeInput"}
    +	if s.SnapshotId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SnapshotId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *ModifySnapshotAttributeInput) SetAttribute(v string) *ModifySnapshotAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetCreateVolumePermission sets the CreateVolumePermission field's value.
    +func (s *ModifySnapshotAttributeInput) SetCreateVolumePermission(v *CreateVolumePermissionModifications) *ModifySnapshotAttributeInput {
    +	s.CreateVolumePermission = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifySnapshotAttributeInput) SetDryRun(v bool) *ModifySnapshotAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGroupNames sets the GroupNames field's value.
    +func (s *ModifySnapshotAttributeInput) SetGroupNames(v []*string) *ModifySnapshotAttributeInput {
    +	s.GroupNames = v
    +	return s
    +}
    +
    +// SetOperationType sets the OperationType field's value.
    +func (s *ModifySnapshotAttributeInput) SetOperationType(v string) *ModifySnapshotAttributeInput {
    +	s.OperationType = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *ModifySnapshotAttributeInput) SetSnapshotId(v string) *ModifySnapshotAttributeInput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetUserIds sets the UserIds field's value.
    +func (s *ModifySnapshotAttributeInput) SetUserIds(v []*string) *ModifySnapshotAttributeInput {
    +	s.UserIds = v
    +	return s
    +}
    +
    +type ModifySnapshotAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifySnapshotAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifySnapshotAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifySpotFleetRequest.
    +type ModifySpotFleetRequestInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether running Spot instances should be terminated if the target
    +	// capacity of the Spot fleet request is decreased below the current size of
    +	// the Spot fleet.
    +	ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +
    +	// The size of the fleet.
    +	TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s ModifySpotFleetRequestInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifySpotFleetRequestInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifySpotFleetRequestInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifySpotFleetRequestInput"}
    +	if s.SpotFleetRequestId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value.
    +func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v string) *ModifySpotFleetRequestInput {
    +	s.ExcessCapacityTerminationPolicy = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *ModifySpotFleetRequestInput) SetSpotFleetRequestId(v string) *ModifySpotFleetRequestInput {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// SetTargetCapacity sets the TargetCapacity field's value.
    +func (s *ModifySpotFleetRequestInput) SetTargetCapacity(v int64) *ModifySpotFleetRequestInput {
    +	s.TargetCapacity = &v
    +	return s
    +}
    +
    +// Contains the output of ModifySpotFleetRequest.
    +type ModifySpotFleetRequestOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Is true if the request succeeds, and an error otherwise.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s ModifySpotFleetRequestOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifySpotFleetRequestOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *ModifySpotFleetRequestOutput) SetReturn(v bool) *ModifySpotFleetRequestOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for ModifySubnetAttribute.
    +type ModifySubnetAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Specify true to indicate that instances launched into the specified subnet
    +	// should be assigned public IP address.
    +	MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"`
    +
    +	// The ID of the subnet.
    +	//
    +	// SubnetId is a required field
    +	SubnetId *string `locationName:"subnetId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifySubnetAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifySubnetAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifySubnetAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifySubnetAttributeInput"}
    +	if s.SubnetId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SubnetId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value.
    +func (s *ModifySubnetAttributeInput) SetMapPublicIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput {
    +	s.MapPublicIpOnLaunch = v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *ModifySubnetAttributeInput) SetSubnetId(v string) *ModifySubnetAttributeInput {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +type ModifySubnetAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifySubnetAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifySubnetAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyVolumeAttribute.
    +type ModifyVolumeAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the volume should be auto-enabled for I/O operations.
    +	AutoEnableIO *AttributeBooleanValue `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the volume.
    +	//
    +	// VolumeId is a required field
    +	VolumeId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVolumeAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVolumeAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyVolumeAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyVolumeAttributeInput"}
    +	if s.VolumeId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VolumeId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAutoEnableIO sets the AutoEnableIO field's value.
    +func (s *ModifyVolumeAttributeInput) SetAutoEnableIO(v *AttributeBooleanValue) *ModifyVolumeAttributeInput {
    +	s.AutoEnableIO = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifyVolumeAttributeInput) SetDryRun(v bool) *ModifyVolumeAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *ModifyVolumeAttributeInput) SetVolumeId(v string) *ModifyVolumeAttributeInput {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +type ModifyVolumeAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVolumeAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVolumeAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyVpcAttribute.
    +type ModifyVpcAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the instances launched in the VPC get DNS hostnames. If
    +	// enabled, instances in the VPC get DNS hostnames; otherwise, they do not.
    +	//
    +	// You cannot modify the DNS resolution and DNS hostnames attributes in the
    +	// same request. Use separate requests for each attribute. You can only enable
    +	// DNS hostnames if you've enabled DNS support.
    +	EnableDnsHostnames *AttributeBooleanValue `type:"structure"`
    +
    +	// Indicates whether the DNS resolution is supported for the VPC. If enabled,
    +	// queries to the Amazon provided DNS server at the 169.254.169.253 IP address,
    +	// or the reserved IP address at the base of the VPC network range "plus two"
    +	// will succeed. If disabled, the Amazon provided DNS service in the VPC that
    +	// resolves public DNS hostnames to IP addresses is not enabled.
    +	//
    +	// You cannot modify the DNS resolution and DNS hostnames attributes in the
    +	// same request. Use separate requests for each attribute.
    +	EnableDnsSupport *AttributeBooleanValue `type:"structure"`
    +
    +	// The ID of the VPC.
    +	//
    +	// VpcId is a required field
    +	VpcId *string `locationName:"vpcId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVpcAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVpcAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyVpcAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyVpcAttributeInput"}
    +	if s.VpcId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetEnableDnsHostnames sets the EnableDnsHostnames field's value.
    +func (s *ModifyVpcAttributeInput) SetEnableDnsHostnames(v *AttributeBooleanValue) *ModifyVpcAttributeInput {
    +	s.EnableDnsHostnames = v
    +	return s
    +}
    +
    +// SetEnableDnsSupport sets the EnableDnsSupport field's value.
    +func (s *ModifyVpcAttributeInput) SetEnableDnsSupport(v *AttributeBooleanValue) *ModifyVpcAttributeInput {
    +	s.EnableDnsSupport = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *ModifyVpcAttributeInput) SetVpcId(v string) *ModifyVpcAttributeInput {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +type ModifyVpcAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVpcAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVpcAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ModifyVpcEndpoint.
    +type ModifyVpcEndpointInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more route tables IDs to associate with the endpoint.
    +	AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// A policy document to attach to the endpoint. The policy must be in valid
    +	// JSON format.
    +	PolicyDocument *string `type:"string"`
    +
    +	// One or more route table IDs to disassociate from the endpoint.
    +	RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"`
    +
    +	// Specify true to reset the policy document to the default policy. The default
    +	// policy allows access to the service.
    +	ResetPolicy *bool `type:"boolean"`
    +
    +	// The ID of the endpoint.
    +	//
    +	// VpcEndpointId is a required field
    +	VpcEndpointId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVpcEndpointInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVpcEndpointInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyVpcEndpointInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyVpcEndpointInput"}
    +	if s.VpcEndpointId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcEndpointId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAddRouteTableIds sets the AddRouteTableIds field's value.
    +func (s *ModifyVpcEndpointInput) SetAddRouteTableIds(v []*string) *ModifyVpcEndpointInput {
    +	s.AddRouteTableIds = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifyVpcEndpointInput) SetDryRun(v bool) *ModifyVpcEndpointInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPolicyDocument sets the PolicyDocument field's value.
    +func (s *ModifyVpcEndpointInput) SetPolicyDocument(v string) *ModifyVpcEndpointInput {
    +	s.PolicyDocument = &v
    +	return s
    +}
    +
    +// SetRemoveRouteTableIds sets the RemoveRouteTableIds field's value.
    +func (s *ModifyVpcEndpointInput) SetRemoveRouteTableIds(v []*string) *ModifyVpcEndpointInput {
    +	s.RemoveRouteTableIds = v
    +	return s
    +}
    +
    +// SetResetPolicy sets the ResetPolicy field's value.
    +func (s *ModifyVpcEndpointInput) SetResetPolicy(v bool) *ModifyVpcEndpointInput {
    +	s.ResetPolicy = &v
    +	return s
    +}
    +
    +// SetVpcEndpointId sets the VpcEndpointId field's value.
    +func (s *ModifyVpcEndpointInput) SetVpcEndpointId(v string) *ModifyVpcEndpointInput {
    +	s.VpcEndpointId = &v
    +	return s
    +}
    +
    +// Contains the output of ModifyVpcEndpoint.
    +type ModifyVpcEndpointOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVpcEndpointOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVpcEndpointOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *ModifyVpcEndpointOutput) SetReturn(v bool) *ModifyVpcEndpointOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +type ModifyVpcPeeringConnectionOptionsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The VPC peering connection options for the accepter VPC.
    +	AccepterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the operation, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The VPC peering connection options for the requester VPC.
    +	RequesterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"`
    +
    +	// The ID of the VPC peering connection.
    +	//
    +	// VpcPeeringConnectionId is a required field
    +	VpcPeeringConnectionId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVpcPeeringConnectionOptionsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVpcPeeringConnectionOptionsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ModifyVpcPeeringConnectionOptionsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ModifyVpcPeeringConnectionOptionsInput"}
    +	if s.VpcPeeringConnectionId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAccepterPeeringConnectionOptions sets the AccepterPeeringConnectionOptions field's value.
    +func (s *ModifyVpcPeeringConnectionOptionsInput) SetAccepterPeeringConnectionOptions(v *PeeringConnectionOptionsRequest) *ModifyVpcPeeringConnectionOptionsInput {
    +	s.AccepterPeeringConnectionOptions = v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ModifyVpcPeeringConnectionOptionsInput) SetDryRun(v bool) *ModifyVpcPeeringConnectionOptionsInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetRequesterPeeringConnectionOptions sets the RequesterPeeringConnectionOptions field's value.
    +func (s *ModifyVpcPeeringConnectionOptionsInput) SetRequesterPeeringConnectionOptions(v *PeeringConnectionOptionsRequest) *ModifyVpcPeeringConnectionOptionsInput {
    +	s.RequesterPeeringConnectionOptions = v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *ModifyVpcPeeringConnectionOptionsInput) SetVpcPeeringConnectionId(v string) *ModifyVpcPeeringConnectionOptionsInput {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +type ModifyVpcPeeringConnectionOptionsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the VPC peering connection options for the accepter VPC.
    +	AccepterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"accepterPeeringConnectionOptions" type:"structure"`
    +
    +	// Information about the VPC peering connection options for the requester VPC.
    +	RequesterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"requesterPeeringConnectionOptions" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ModifyVpcPeeringConnectionOptionsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ModifyVpcPeeringConnectionOptionsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAccepterPeeringConnectionOptions sets the AccepterPeeringConnectionOptions field's value.
    +func (s *ModifyVpcPeeringConnectionOptionsOutput) SetAccepterPeeringConnectionOptions(v *PeeringConnectionOptions) *ModifyVpcPeeringConnectionOptionsOutput {
    +	s.AccepterPeeringConnectionOptions = v
    +	return s
    +}
    +
    +// SetRequesterPeeringConnectionOptions sets the RequesterPeeringConnectionOptions field's value.
    +func (s *ModifyVpcPeeringConnectionOptionsOutput) SetRequesterPeeringConnectionOptions(v *PeeringConnectionOptions) *ModifyVpcPeeringConnectionOptionsOutput {
    +	s.RequesterPeeringConnectionOptions = v
    +	return s
    +}
    +
    +// Contains the parameters for MonitorInstances.
    +type MonitorInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// InstanceIds is a required field
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s MonitorInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s MonitorInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *MonitorInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "MonitorInstancesInput"}
    +	if s.InstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *MonitorInstancesInput) SetDryRun(v bool) *MonitorInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *MonitorInstancesInput) SetInstanceIds(v []*string) *MonitorInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// Contains the output of MonitorInstances.
    +type MonitorInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Monitoring information for one or more instances.
    +	InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s MonitorInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s MonitorInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceMonitorings sets the InstanceMonitorings field's value.
    +func (s *MonitorInstancesOutput) SetInstanceMonitorings(v []*InstanceMonitoring) *MonitorInstancesOutput {
    +	s.InstanceMonitorings = v
    +	return s
    +}
    +
    +// Describes the monitoring for the instance.
    +type Monitoring struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether monitoring is enabled for the instance.
    +	State *string `locationName:"state" type:"string" enum:"MonitoringState"`
    +}
    +
    +// String returns the string representation
    +func (s Monitoring) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Monitoring) GoString() string {
    +	return s.String()
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Monitoring) SetState(v string) *Monitoring {
    +	s.State = &v
    +	return s
    +}
    +
    +// Contains the parameters for MoveAddressToVpc.
    +type MoveAddressToVpcInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The Elastic IP address.
    +	//
    +	// PublicIp is a required field
    +	PublicIp *string `locationName:"publicIp" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s MoveAddressToVpcInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s MoveAddressToVpcInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *MoveAddressToVpcInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "MoveAddressToVpcInput"}
    +	if s.PublicIp == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PublicIp"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *MoveAddressToVpcInput) SetDryRun(v bool) *MoveAddressToVpcInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *MoveAddressToVpcInput) SetPublicIp(v string) *MoveAddressToVpcInput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Contains the output of MoveAddressToVpc.
    +type MoveAddressToVpcOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The allocation ID for the Elastic IP address.
    +	AllocationId *string `locationName:"allocationId" type:"string"`
    +
    +	// The status of the move of the IP address.
    +	Status *string `locationName:"status" type:"string" enum:"Status"`
    +}
    +
    +// String returns the string representation
    +func (s MoveAddressToVpcOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s MoveAddressToVpcOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *MoveAddressToVpcOutput) SetAllocationId(v string) *MoveAddressToVpcOutput {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *MoveAddressToVpcOutput) SetStatus(v string) *MoveAddressToVpcOutput {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes the status of a moving Elastic IP address.
    +type MovingAddressStatus struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The status of the Elastic IP address that's being moved to the EC2-VPC platform,
    +	// or restored to the EC2-Classic platform.
    +	MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"`
    +
    +	// The Elastic IP address.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s MovingAddressStatus) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s MovingAddressStatus) GoString() string {
    +	return s.String()
    +}
    +
    +// SetMoveStatus sets the MoveStatus field's value.
    +func (s *MovingAddressStatus) SetMoveStatus(v string) *MovingAddressStatus {
    +	s.MoveStatus = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *MovingAddressStatus) SetPublicIp(v string) *MovingAddressStatus {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Describes a NAT gateway.
    +type NatGateway struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The date and time the NAT gateway was created.
    +	CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The date and time the NAT gateway was deleted, if applicable.
    +	DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// If the NAT gateway could not be created, specifies the error code for the
    +	// failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound
    +	// | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound)
    +	FailureCode *string `locationName:"failureCode" type:"string"`
    +
    +	// If the NAT gateway could not be created, specifies the error message for
    +	// the failure, that corresponds to the error code.
    +	//
    +	//    * For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free
    +	//    addresses to create this NAT gateway"
    +	//
    +	//    * For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway
    +	//    attached"
    +	//
    +	//    * For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx
    +	//    could not be associated with this NAT gateway"
    +	//
    +	//    * For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx
    +	//    is already associated"
    +	//
    +	//    * For InternalError: "Network interface eni-xxxxxxxx, created and used
    +	//    internally by this NAT gateway is in an invalid state. Please try again."
    +	//
    +	//    * For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx
    +	//    does not exist or could not be found."
    +	FailureMessage *string `locationName:"failureMessage" type:"string"`
    +
    +	// Information about the IP addresses and network interface associated with
    +	// the NAT gateway.
    +	NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the NAT gateway.
    +	NatGatewayId *string `locationName:"natGatewayId" type:"string"`
    +
    +	// Reserved. If you need to sustain traffic greater than the documented limits
    +	// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +	// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +	ProvisionedBandwidth *ProvisionedBandwidth `locationName:"provisionedBandwidth" type:"structure"`
    +
    +	// The state of the NAT gateway.
    +	//
    +	//    * pending: The NAT gateway is being created and is not ready to process
    +	//    traffic.
    +	//
    +	//    * failed: The NAT gateway could not be created. Check the failureCode
    +	//    and failureMessage fields for the reason.
    +	//
    +	//    * available: The NAT gateway is able to process traffic. This status remains
    +	//    until you delete the NAT gateway, and does not indicate the health of
    +	//    the NAT gateway.
    +	//
    +	//    * deleting: The NAT gateway is in the process of being terminated and
    +	//    may still be processing traffic.
    +	//
    +	//    * deleted: The NAT gateway has been terminated and is no longer processing
    +	//    traffic.
    +	State *string `locationName:"state" type:"string" enum:"NatGatewayState"`
    +
    +	// The ID of the subnet in which the NAT gateway is located.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// The ID of the VPC in which the NAT gateway is located.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NatGateway) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NatGateway) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCreateTime sets the CreateTime field's value.
    +func (s *NatGateway) SetCreateTime(v time.Time) *NatGateway {
    +	s.CreateTime = &v
    +	return s
    +}
    +
    +// SetDeleteTime sets the DeleteTime field's value.
    +func (s *NatGateway) SetDeleteTime(v time.Time) *NatGateway {
    +	s.DeleteTime = &v
    +	return s
    +}
    +
    +// SetFailureCode sets the FailureCode field's value.
    +func (s *NatGateway) SetFailureCode(v string) *NatGateway {
    +	s.FailureCode = &v
    +	return s
    +}
    +
    +// SetFailureMessage sets the FailureMessage field's value.
    +func (s *NatGateway) SetFailureMessage(v string) *NatGateway {
    +	s.FailureMessage = &v
    +	return s
    +}
    +
    +// SetNatGatewayAddresses sets the NatGatewayAddresses field's value.
    +func (s *NatGateway) SetNatGatewayAddresses(v []*NatGatewayAddress) *NatGateway {
    +	s.NatGatewayAddresses = v
    +	return s
    +}
    +
    +// SetNatGatewayId sets the NatGatewayId field's value.
    +func (s *NatGateway) SetNatGatewayId(v string) *NatGateway {
    +	s.NatGatewayId = &v
    +	return s
    +}
    +
    +// SetProvisionedBandwidth sets the ProvisionedBandwidth field's value.
    +func (s *NatGateway) SetProvisionedBandwidth(v *ProvisionedBandwidth) *NatGateway {
    +	s.ProvisionedBandwidth = v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *NatGateway) SetState(v string) *NatGateway {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *NatGateway) SetSubnetId(v string) *NatGateway {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *NatGateway) SetVpcId(v string) *NatGateway {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes the IP addresses and network interface associated with a NAT gateway.
    +type NatGatewayAddress struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The allocation ID of the Elastic IP address that's associated with the NAT
    +	// gateway.
    +	AllocationId *string `locationName:"allocationId" type:"string"`
    +
    +	// The ID of the network interface associated with the NAT gateway.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The private IP address associated with the Elastic IP address.
    +	PrivateIp *string `locationName:"privateIp" type:"string"`
    +
    +	// The Elastic IP address associated with the NAT gateway.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NatGatewayAddress) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NatGatewayAddress) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *NatGatewayAddress) SetAllocationId(v string) *NatGatewayAddress {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *NatGatewayAddress) SetNetworkInterfaceId(v string) *NatGatewayAddress {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetPrivateIp sets the PrivateIp field's value.
    +func (s *NatGatewayAddress) SetPrivateIp(v string) *NatGatewayAddress {
    +	s.PrivateIp = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *NatGatewayAddress) SetPublicIp(v string) *NatGatewayAddress {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Describes a network ACL.
    +type NetworkAcl struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Any associations between the network ACL and one or more subnets
    +	Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"`
    +
    +	// One or more entries (rules) in the network ACL.
    +	Entries []*NetworkAclEntry `locationName:"entrySet" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether this is the default network ACL for the VPC.
    +	IsDefault *bool `locationName:"default" type:"boolean"`
    +
    +	// The ID of the network ACL.
    +	NetworkAclId *string `locationName:"networkAclId" type:"string"`
    +
    +	// Any tags assigned to the network ACL.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC for the network ACL.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkAcl) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkAcl) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociations sets the Associations field's value.
    +func (s *NetworkAcl) SetAssociations(v []*NetworkAclAssociation) *NetworkAcl {
    +	s.Associations = v
    +	return s
    +}
    +
    +// SetEntries sets the Entries field's value.
    +func (s *NetworkAcl) SetEntries(v []*NetworkAclEntry) *NetworkAcl {
    +	s.Entries = v
    +	return s
    +}
    +
    +// SetIsDefault sets the IsDefault field's value.
    +func (s *NetworkAcl) SetIsDefault(v bool) *NetworkAcl {
    +	s.IsDefault = &v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *NetworkAcl) SetNetworkAclId(v string) *NetworkAcl {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *NetworkAcl) SetTags(v []*Tag) *NetworkAcl {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *NetworkAcl) SetVpcId(v string) *NetworkAcl {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes an association between a network ACL and a subnet.
    +type NetworkAclAssociation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the association between a network ACL and a subnet.
    +	NetworkAclAssociationId *string `locationName:"networkAclAssociationId" type:"string"`
    +
    +	// The ID of the network ACL.
    +	NetworkAclId *string `locationName:"networkAclId" type:"string"`
    +
    +	// The ID of the subnet.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkAclAssociation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkAclAssociation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNetworkAclAssociationId sets the NetworkAclAssociationId field's value.
    +func (s *NetworkAclAssociation) SetNetworkAclAssociationId(v string) *NetworkAclAssociation {
    +	s.NetworkAclAssociationId = &v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *NetworkAclAssociation) SetNetworkAclId(v string) *NetworkAclAssociation {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *NetworkAclAssociation) SetSubnetId(v string) *NetworkAclAssociation {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Describes an entry in a network ACL.
    +type NetworkAclEntry struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The network range to allow or deny, in CIDR notation.
    +	CidrBlock *string `locationName:"cidrBlock" type:"string"`
    +
    +	// Indicates whether the rule is an egress rule (applied to traffic leaving
    +	// the subnet).
    +	Egress *bool `locationName:"egress" type:"boolean"`
    +
    +	// ICMP protocol: The ICMP type and code.
    +	IcmpTypeCode *IcmpTypeCode `locationName:"icmpTypeCode" type:"structure"`
    +
    +	// TCP or UDP protocols: The range of ports the rule applies to.
    +	PortRange *PortRange `locationName:"portRange" type:"structure"`
    +
    +	// The protocol. A value of -1 means all protocols.
    +	Protocol *string `locationName:"protocol" type:"string"`
    +
    +	// Indicates whether to allow or deny the traffic that matches the rule.
    +	RuleAction *string `locationName:"ruleAction" type:"string" enum:"RuleAction"`
    +
    +	// The rule number for the entry. ACL entries are processed in ascending order
    +	// by rule number.
    +	RuleNumber *int64 `locationName:"ruleNumber" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkAclEntry) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkAclEntry) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *NetworkAclEntry) SetCidrBlock(v string) *NetworkAclEntry {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetEgress sets the Egress field's value.
    +func (s *NetworkAclEntry) SetEgress(v bool) *NetworkAclEntry {
    +	s.Egress = &v
    +	return s
    +}
    +
    +// SetIcmpTypeCode sets the IcmpTypeCode field's value.
    +func (s *NetworkAclEntry) SetIcmpTypeCode(v *IcmpTypeCode) *NetworkAclEntry {
    +	s.IcmpTypeCode = v
    +	return s
    +}
    +
    +// SetPortRange sets the PortRange field's value.
    +func (s *NetworkAclEntry) SetPortRange(v *PortRange) *NetworkAclEntry {
    +	s.PortRange = v
    +	return s
    +}
    +
    +// SetProtocol sets the Protocol field's value.
    +func (s *NetworkAclEntry) SetProtocol(v string) *NetworkAclEntry {
    +	s.Protocol = &v
    +	return s
    +}
    +
    +// SetRuleAction sets the RuleAction field's value.
    +func (s *NetworkAclEntry) SetRuleAction(v string) *NetworkAclEntry {
    +	s.RuleAction = &v
    +	return s
    +}
    +
    +// SetRuleNumber sets the RuleNumber field's value.
    +func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry {
    +	s.RuleNumber = &v
    +	return s
    +}
    +
    +// Describes a network interface.
    +type NetworkInterface struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The association information for an Elastic IP associated with the network
    +	// interface.
    +	Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"`
    +
    +	// The network interface attachment.
    +	Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"`
    +
    +	// The Availability Zone.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// A description.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Any security groups for the network interface.
    +	Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The type of interface.
    +	InterfaceType *string `locationName:"interfaceType" type:"string" enum:"NetworkInterfaceType"`
    +
    +	// The MAC address.
    +	MacAddress *string `locationName:"macAddress" type:"string"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The AWS account ID of the owner of the network interface.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// The private DNS name.
    +	PrivateDnsName *string `locationName:"privateDnsName" type:"string"`
    +
    +	// The IP address of the network interface within the subnet.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// The private IP addresses associated with the network interface.
    +	PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the entity that launched the instance on your behalf (for example,
    +	// AWS Management Console or Auto Scaling).
    +	RequesterId *string `locationName:"requesterId" type:"string"`
    +
    +	// Indicates whether the network interface is being managed by AWS.
    +	RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"`
    +
    +	// Indicates whether traffic to or from the instance is validated.
    +	SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"`
    +
    +	// The status of the network interface.
    +	Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"`
    +
    +	// The ID of the subnet.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// Any tags assigned to the network interface.
    +	TagSet []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkInterface) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkInterface) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociation sets the Association field's value.
    +func (s *NetworkInterface) SetAssociation(v *NetworkInterfaceAssociation) *NetworkInterface {
    +	s.Association = v
    +	return s
    +}
    +
    +// SetAttachment sets the Attachment field's value.
    +func (s *NetworkInterface) SetAttachment(v *NetworkInterfaceAttachment) *NetworkInterface {
    +	s.Attachment = v
    +	return s
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *NetworkInterface) SetAvailabilityZone(v string) *NetworkInterface {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *NetworkInterface) SetDescription(v string) *NetworkInterface {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *NetworkInterface) SetGroups(v []*GroupIdentifier) *NetworkInterface {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetInterfaceType sets the InterfaceType field's value.
    +func (s *NetworkInterface) SetInterfaceType(v string) *NetworkInterface {
    +	s.InterfaceType = &v
    +	return s
    +}
    +
    +// SetMacAddress sets the MacAddress field's value.
    +func (s *NetworkInterface) SetMacAddress(v string) *NetworkInterface {
    +	s.MacAddress = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *NetworkInterface) SetNetworkInterfaceId(v string) *NetworkInterface {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *NetworkInterface) SetOwnerId(v string) *NetworkInterface {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetPrivateDnsName sets the PrivateDnsName field's value.
    +func (s *NetworkInterface) SetPrivateDnsName(v string) *NetworkInterface {
    +	s.PrivateDnsName = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *NetworkInterface) SetPrivateIpAddress(v string) *NetworkInterface {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
    +func (s *NetworkInterface) SetPrivateIpAddresses(v []*NetworkInterfacePrivateIpAddress) *NetworkInterface {
    +	s.PrivateIpAddresses = v
    +	return s
    +}
    +
    +// SetRequesterId sets the RequesterId field's value.
    +func (s *NetworkInterface) SetRequesterId(v string) *NetworkInterface {
    +	s.RequesterId = &v
    +	return s
    +}
    +
    +// SetRequesterManaged sets the RequesterManaged field's value.
    +func (s *NetworkInterface) SetRequesterManaged(v bool) *NetworkInterface {
    +	s.RequesterManaged = &v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *NetworkInterface) SetSourceDestCheck(v bool) *NetworkInterface {
    +	s.SourceDestCheck = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *NetworkInterface) SetStatus(v string) *NetworkInterface {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *NetworkInterface) SetSubnetId(v string) *NetworkInterface {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetTagSet sets the TagSet field's value.
    +func (s *NetworkInterface) SetTagSet(v []*Tag) *NetworkInterface {
    +	s.TagSet = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes association information for an Elastic IP address.
    +type NetworkInterfaceAssociation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The allocation ID.
    +	AllocationId *string `locationName:"allocationId" type:"string"`
    +
    +	// The association ID.
    +	AssociationId *string `locationName:"associationId" type:"string"`
    +
    +	// The ID of the Elastic IP address owner.
    +	IpOwnerId *string `locationName:"ipOwnerId" type:"string"`
    +
    +	// The public DNS name.
    +	PublicDnsName *string `locationName:"publicDnsName" type:"string"`
    +
    +	// The address of the Elastic IP address bound to the network interface.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkInterfaceAssociation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkInterfaceAssociation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *NetworkInterfaceAssociation) SetAllocationId(v string) *NetworkInterfaceAssociation {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *NetworkInterfaceAssociation) SetAssociationId(v string) *NetworkInterfaceAssociation {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// SetIpOwnerId sets the IpOwnerId field's value.
    +func (s *NetworkInterfaceAssociation) SetIpOwnerId(v string) *NetworkInterfaceAssociation {
    +	s.IpOwnerId = &v
    +	return s
    +}
    +
    +// SetPublicDnsName sets the PublicDnsName field's value.
    +func (s *NetworkInterfaceAssociation) SetPublicDnsName(v string) *NetworkInterfaceAssociation {
    +	s.PublicDnsName = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *NetworkInterfaceAssociation) SetPublicIp(v string) *NetworkInterfaceAssociation {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Describes a network interface attachment.
    +type NetworkInterfaceAttachment struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The timestamp indicating when the attachment initiated.
    +	AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The ID of the network interface attachment.
    +	AttachmentId *string `locationName:"attachmentId" type:"string"`
    +
    +	// Indicates whether the network interface is deleted when the instance is terminated.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// The device index of the network interface attachment on the instance.
    +	DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The AWS account ID of the owner of the instance.
    +	InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"`
    +
    +	// The attachment state.
    +	Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkInterfaceAttachment) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkInterfaceAttachment) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachTime sets the AttachTime field's value.
    +func (s *NetworkInterfaceAttachment) SetAttachTime(v time.Time) *NetworkInterfaceAttachment {
    +	s.AttachTime = &v
    +	return s
    +}
    +
    +// SetAttachmentId sets the AttachmentId field's value.
    +func (s *NetworkInterfaceAttachment) SetAttachmentId(v string) *NetworkInterfaceAttachment {
    +	s.AttachmentId = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *NetworkInterfaceAttachment) SetDeleteOnTermination(v bool) *NetworkInterfaceAttachment {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetDeviceIndex sets the DeviceIndex field's value.
    +func (s *NetworkInterfaceAttachment) SetDeviceIndex(v int64) *NetworkInterfaceAttachment {
    +	s.DeviceIndex = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *NetworkInterfaceAttachment) SetInstanceId(v string) *NetworkInterfaceAttachment {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceOwnerId sets the InstanceOwnerId field's value.
    +func (s *NetworkInterfaceAttachment) SetInstanceOwnerId(v string) *NetworkInterfaceAttachment {
    +	s.InstanceOwnerId = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *NetworkInterfaceAttachment) SetStatus(v string) *NetworkInterfaceAttachment {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes an attachment change.
    +type NetworkInterfaceAttachmentChanges struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the network interface attachment.
    +	AttachmentId *string `locationName:"attachmentId" type:"string"`
    +
    +	// Indicates whether the network interface is deleted when the instance is terminated.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkInterfaceAttachmentChanges) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkInterfaceAttachmentChanges) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachmentId sets the AttachmentId field's value.
    +func (s *NetworkInterfaceAttachmentChanges) SetAttachmentId(v string) *NetworkInterfaceAttachmentChanges {
    +	s.AttachmentId = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *NetworkInterfaceAttachmentChanges) SetDeleteOnTermination(v bool) *NetworkInterfaceAttachmentChanges {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// Describes the private IP address of a network interface.
    +type NetworkInterfacePrivateIpAddress struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The association information for an Elastic IP address associated with the
    +	// network interface.
    +	Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"`
    +
    +	// Indicates whether this IP address is the primary private IP address of the
    +	// network interface.
    +	Primary *bool `locationName:"primary" type:"boolean"`
    +
    +	// The private DNS name.
    +	PrivateDnsName *string `locationName:"privateDnsName" type:"string"`
    +
    +	// The private IP address.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s NetworkInterfacePrivateIpAddress) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NetworkInterfacePrivateIpAddress) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociation sets the Association field's value.
    +func (s *NetworkInterfacePrivateIpAddress) SetAssociation(v *NetworkInterfaceAssociation) *NetworkInterfacePrivateIpAddress {
    +	s.Association = v
    +	return s
    +}
    +
    +// SetPrimary sets the Primary field's value.
    +func (s *NetworkInterfacePrivateIpAddress) SetPrimary(v bool) *NetworkInterfacePrivateIpAddress {
    +	s.Primary = &v
    +	return s
    +}
    +
    +// SetPrivateDnsName sets the PrivateDnsName field's value.
    +func (s *NetworkInterfacePrivateIpAddress) SetPrivateDnsName(v string) *NetworkInterfacePrivateIpAddress {
    +	s.PrivateDnsName = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *NetworkInterfacePrivateIpAddress) SetPrivateIpAddress(v string) *NetworkInterfacePrivateIpAddress {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +type NewDhcpConfiguration struct {
    +	_ struct{} `type:"structure"`
    +
    +	Key *string `locationName:"key" type:"string"`
    +
    +	Values []*string `locationName:"Value" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s NewDhcpConfiguration) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s NewDhcpConfiguration) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKey sets the Key field's value.
    +func (s *NewDhcpConfiguration) SetKey(v string) *NewDhcpConfiguration {
    +	s.Key = &v
    +	return s
    +}
    +
    +// SetValues sets the Values field's value.
    +func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration {
    +	s.Values = v
    +	return s
    +}
    +
    +// Describes the VPC peering connection options.
    +type PeeringConnectionOptions struct {
    +	_ struct{} `type:"structure"`
    +
    +	// If true, enables a local VPC to resolve public DNS hostnames to private IP
    +	// addresses when queried from instances in the peer VPC.
    +	AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"`
    +
    +	// If true, enables outbound communication from an EC2-Classic instance that's
    +	// linked to a local VPC via ClassicLink to instances in a peer VPC.
    +	AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"`
    +
    +	// If true, enables outbound communication from instances in a local VPC to
    +	// an EC2-Classic instance that's linked to a peer VPC via ClassicLink.
    +	AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s PeeringConnectionOptions) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PeeringConnectionOptions) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllowDnsResolutionFromRemoteVpc sets the AllowDnsResolutionFromRemoteVpc field's value.
    +func (s *PeeringConnectionOptions) SetAllowDnsResolutionFromRemoteVpc(v bool) *PeeringConnectionOptions {
    +	s.AllowDnsResolutionFromRemoteVpc = &v
    +	return s
    +}
    +
    +// SetAllowEgressFromLocalClassicLinkToRemoteVpc sets the AllowEgressFromLocalClassicLinkToRemoteVpc field's value.
    +func (s *PeeringConnectionOptions) SetAllowEgressFromLocalClassicLinkToRemoteVpc(v bool) *PeeringConnectionOptions {
    +	s.AllowEgressFromLocalClassicLinkToRemoteVpc = &v
    +	return s
    +}
    +
    +// SetAllowEgressFromLocalVpcToRemoteClassicLink sets the AllowEgressFromLocalVpcToRemoteClassicLink field's value.
    +func (s *PeeringConnectionOptions) SetAllowEgressFromLocalVpcToRemoteClassicLink(v bool) *PeeringConnectionOptions {
    +	s.AllowEgressFromLocalVpcToRemoteClassicLink = &v
    +	return s
    +}
    +
    +// The VPC peering connection options.
    +type PeeringConnectionOptionsRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// If true, enables a local VPC to resolve public DNS hostnames to private IP
    +	// addresses when queried from instances in the peer VPC.
    +	AllowDnsResolutionFromRemoteVpc *bool `type:"boolean"`
    +
    +	// If true, enables outbound communication from an EC2-Classic instance that's
    +	// linked to a local VPC via ClassicLink to instances in a peer VPC.
    +	AllowEgressFromLocalClassicLinkToRemoteVpc *bool `type:"boolean"`
    +
    +	// If true, enables outbound communication from instances in a local VPC to
    +	// an EC2-Classic instance that's linked to a peer VPC via ClassicLink.
    +	AllowEgressFromLocalVpcToRemoteClassicLink *bool `type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s PeeringConnectionOptionsRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PeeringConnectionOptionsRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllowDnsResolutionFromRemoteVpc sets the AllowDnsResolutionFromRemoteVpc field's value.
    +func (s *PeeringConnectionOptionsRequest) SetAllowDnsResolutionFromRemoteVpc(v bool) *PeeringConnectionOptionsRequest {
    +	s.AllowDnsResolutionFromRemoteVpc = &v
    +	return s
    +}
    +
    +// SetAllowEgressFromLocalClassicLinkToRemoteVpc sets the AllowEgressFromLocalClassicLinkToRemoteVpc field's value.
    +func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalClassicLinkToRemoteVpc(v bool) *PeeringConnectionOptionsRequest {
    +	s.AllowEgressFromLocalClassicLinkToRemoteVpc = &v
    +	return s
    +}
    +
    +// SetAllowEgressFromLocalVpcToRemoteClassicLink sets the AllowEgressFromLocalVpcToRemoteClassicLink field's value.
    +func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalVpcToRemoteClassicLink(v bool) *PeeringConnectionOptionsRequest {
    +	s.AllowEgressFromLocalVpcToRemoteClassicLink = &v
    +	return s
    +}
    +
    +// Describes the placement for the instance.
    +type Placement struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The affinity setting for the instance on the Dedicated Host. This parameter
    +	// is not supported for the ImportInstance command.
    +	Affinity *string `locationName:"affinity" type:"string"`
    +
    +	// The Availability Zone of the instance.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The name of the placement group the instance is in (for cluster compute instances).
    +	GroupName *string `locationName:"groupName" type:"string"`
    +
    +	// The ID of the Dedicted host on which the instance resides. This parameter
    +	// is not support for the ImportInstance command.
    +	HostId *string `locationName:"hostId" type:"string"`
    +
    +	// The tenancy of the instance (if the instance is running in a VPC). An instance
    +	// with a tenancy of dedicated runs on single-tenant hardware. The host tenancy
    +	// is not supported for the ImportInstance command.
    +	Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"`
    +}
    +
    +// String returns the string representation
    +func (s Placement) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Placement) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAffinity sets the Affinity field's value.
    +func (s *Placement) SetAffinity(v string) *Placement {
    +	s.Affinity = &v
    +	return s
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *Placement) SetAvailabilityZone(v string) *Placement {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *Placement) SetGroupName(v string) *Placement {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetHostId sets the HostId field's value.
    +func (s *Placement) SetHostId(v string) *Placement {
    +	s.HostId = &v
    +	return s
    +}
    +
    +// SetTenancy sets the Tenancy field's value.
    +func (s *Placement) SetTenancy(v string) *Placement {
    +	s.Tenancy = &v
    +	return s
    +}
    +
    +// Describes a placement group.
    +type PlacementGroup struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the placement group.
    +	GroupName *string `locationName:"groupName" type:"string"`
    +
    +	// The state of the placement group.
    +	State *string `locationName:"state" type:"string" enum:"PlacementGroupState"`
    +
    +	// The placement strategy.
    +	Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"`
    +}
    +
    +// String returns the string representation
    +func (s PlacementGroup) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PlacementGroup) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *PlacementGroup) SetGroupName(v string) *PlacementGroup {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *PlacementGroup) SetState(v string) *PlacementGroup {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStrategy sets the Strategy field's value.
    +func (s *PlacementGroup) SetStrategy(v string) *PlacementGroup {
    +	s.Strategy = &v
    +	return s
    +}
    +
    +// Describes a range of ports.
    +type PortRange struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The first port in the range.
    +	From *int64 `locationName:"from" type:"integer"`
    +
    +	// The last port in the range.
    +	To *int64 `locationName:"to" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s PortRange) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PortRange) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFrom sets the From field's value.
    +func (s *PortRange) SetFrom(v int64) *PortRange {
    +	s.From = &v
    +	return s
    +}
    +
    +// SetTo sets the To field's value.
    +func (s *PortRange) SetTo(v int64) *PortRange {
    +	s.To = &v
    +	return s
    +}
    +
    +// Describes prefixes for AWS services.
    +type PrefixList struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IP address range of the AWS service.
    +	Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the prefix.
    +	PrefixListId *string `locationName:"prefixListId" type:"string"`
    +
    +	// The name of the prefix.
    +	PrefixListName *string `locationName:"prefixListName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s PrefixList) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PrefixList) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrs sets the Cidrs field's value.
    +func (s *PrefixList) SetCidrs(v []*string) *PrefixList {
    +	s.Cidrs = v
    +	return s
    +}
    +
    +// SetPrefixListId sets the PrefixListId field's value.
    +func (s *PrefixList) SetPrefixListId(v string) *PrefixList {
    +	s.PrefixListId = &v
    +	return s
    +}
    +
    +// SetPrefixListName sets the PrefixListName field's value.
    +func (s *PrefixList) SetPrefixListName(v string) *PrefixList {
    +	s.PrefixListName = &v
    +	return s
    +}
    +
    +// The ID of the prefix.
    +type PrefixListId struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the prefix.
    +	PrefixListId *string `locationName:"prefixListId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s PrefixListId) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PrefixListId) GoString() string {
    +	return s.String()
    +}
    +
    +// SetPrefixListId sets the PrefixListId field's value.
    +func (s *PrefixListId) SetPrefixListId(v string) *PrefixListId {
    +	s.PrefixListId = &v
    +	return s
    +}
    +
    +// Describes the price for a Reserved Instance.
    +type PriceSchedule struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The current price schedule, as determined by the term remaining for the Reserved
    +	// Instance in the listing.
    +	//
    +	// A specific price schedule is always in effect, but only one price schedule
    +	// can be active at any time. Take, for example, a Reserved Instance listing
    +	// that has five months remaining in its term. When you specify price schedules
    +	// for five months and two months, this means that schedule 1, covering the
    +	// first three months of the remaining term, will be active during months 5,
    +	// 4, and 3. Then schedule 2, covering the last two months of the term, will
    +	// be active for months 2 and 1.
    +	Active *bool `locationName:"active" type:"boolean"`
    +
    +	// The currency for transacting the Reserved Instance resale. At this time,
    +	// the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The fixed price for the term.
    +	Price *float64 `locationName:"price" type:"double"`
    +
    +	// The number of months remaining in the reservation. For example, 2 is the
    +	// second to the last month before the capacity reservation expires.
    +	Term *int64 `locationName:"term" type:"long"`
    +}
    +
    +// String returns the string representation
    +func (s PriceSchedule) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PriceSchedule) GoString() string {
    +	return s.String()
    +}
    +
    +// SetActive sets the Active field's value.
    +func (s *PriceSchedule) SetActive(v bool) *PriceSchedule {
    +	s.Active = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *PriceSchedule) SetCurrencyCode(v string) *PriceSchedule {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetPrice sets the Price field's value.
    +func (s *PriceSchedule) SetPrice(v float64) *PriceSchedule {
    +	s.Price = &v
    +	return s
    +}
    +
    +// SetTerm sets the Term field's value.
    +func (s *PriceSchedule) SetTerm(v int64) *PriceSchedule {
    +	s.Term = &v
    +	return s
    +}
    +
    +// Describes the price for a Reserved Instance.
    +type PriceScheduleSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The currency for transacting the Reserved Instance resale. At this time,
    +	// the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The fixed price for the term.
    +	Price *float64 `locationName:"price" type:"double"`
    +
    +	// The number of months remaining in the reservation. For example, 2 is the
    +	// second to the last month before the capacity reservation expires.
    +	Term *int64 `locationName:"term" type:"long"`
    +}
    +
    +// String returns the string representation
    +func (s PriceScheduleSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PriceScheduleSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *PriceScheduleSpecification) SetCurrencyCode(v string) *PriceScheduleSpecification {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetPrice sets the Price field's value.
    +func (s *PriceScheduleSpecification) SetPrice(v float64) *PriceScheduleSpecification {
    +	s.Price = &v
    +	return s
    +}
    +
    +// SetTerm sets the Term field's value.
    +func (s *PriceScheduleSpecification) SetTerm(v int64) *PriceScheduleSpecification {
    +	s.Term = &v
    +	return s
    +}
    +
    +// Describes a Reserved Instance offering.
    +type PricingDetail struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of reservations available for the price.
    +	Count *int64 `locationName:"count" type:"integer"`
    +
    +	// The price per instance.
    +	Price *float64 `locationName:"price" type:"double"`
    +}
    +
    +// String returns the string representation
    +func (s PricingDetail) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PricingDetail) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCount sets the Count field's value.
    +func (s *PricingDetail) SetCount(v int64) *PricingDetail {
    +	s.Count = &v
    +	return s
    +}
    +
    +// SetPrice sets the Price field's value.
    +func (s *PricingDetail) SetPrice(v float64) *PricingDetail {
    +	s.Price = &v
    +	return s
    +}
    +
    +// Describes a secondary private IP address for a network interface.
    +type PrivateIpAddressSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the private IP address is the primary private IP address.
    +	// Only one IP address can be designated as primary.
    +	Primary *bool `locationName:"primary" type:"boolean"`
    +
    +	// The private IP addresses.
    +	//
    +	// PrivateIpAddress is a required field
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s PrivateIpAddressSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PrivateIpAddressSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *PrivateIpAddressSpecification) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "PrivateIpAddressSpecification"}
    +	if s.PrivateIpAddress == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PrivateIpAddress"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetPrimary sets the Primary field's value.
    +func (s *PrivateIpAddressSpecification) SetPrimary(v bool) *PrivateIpAddressSpecification {
    +	s.Primary = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *PrivateIpAddressSpecification) SetPrivateIpAddress(v string) *PrivateIpAddressSpecification {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// Describes a product code.
    +type ProductCode struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The product code.
    +	ProductCodeId *string `locationName:"productCode" type:"string"`
    +
    +	// The type of product code.
    +	ProductCodeType *string `locationName:"type" type:"string" enum:"ProductCodeValues"`
    +}
    +
    +// String returns the string representation
    +func (s ProductCode) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ProductCode) GoString() string {
    +	return s.String()
    +}
    +
    +// SetProductCodeId sets the ProductCodeId field's value.
    +func (s *ProductCode) SetProductCodeId(v string) *ProductCode {
    +	s.ProductCodeId = &v
    +	return s
    +}
    +
    +// SetProductCodeType sets the ProductCodeType field's value.
    +func (s *ProductCode) SetProductCodeType(v string) *ProductCode {
    +	s.ProductCodeType = &v
    +	return s
    +}
    +
    +// Describes a virtual private gateway propagating route.
    +type PropagatingVgw struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the virtual private gateway (VGW).
    +	GatewayId *string `locationName:"gatewayId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s PropagatingVgw) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PropagatingVgw) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGatewayId sets the GatewayId field's value.
    +func (s *PropagatingVgw) SetGatewayId(v string) *PropagatingVgw {
    +	s.GatewayId = &v
    +	return s
    +}
    +
    +// Reserved. If you need to sustain traffic greater than the documented limits
    +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +type ProvisionedBandwidth struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Reserved. If you need to sustain traffic greater than the documented limits
    +	// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +	// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +	ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Reserved. If you need to sustain traffic greater than the documented limits
    +	// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +	// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +	Provisioned *string `locationName:"provisioned" type:"string"`
    +
    +	// Reserved. If you need to sustain traffic greater than the documented limits
    +	// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +	// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +	RequestTime *time.Time `locationName:"requestTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Reserved. If you need to sustain traffic greater than the documented limits
    +	// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +	// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +	Requested *string `locationName:"requested" type:"string"`
    +
    +	// Reserved. If you need to sustain traffic greater than the documented limits
    +	// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html),
    +	// contact us through the Support Center (https://console.aws.amazon.com/support/home?).
    +	Status *string `locationName:"status" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ProvisionedBandwidth) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ProvisionedBandwidth) GoString() string {
    +	return s.String()
    +}
    +
    +// SetProvisionTime sets the ProvisionTime field's value.
    +func (s *ProvisionedBandwidth) SetProvisionTime(v time.Time) *ProvisionedBandwidth {
    +	s.ProvisionTime = &v
    +	return s
    +}
    +
    +// SetProvisioned sets the Provisioned field's value.
    +func (s *ProvisionedBandwidth) SetProvisioned(v string) *ProvisionedBandwidth {
    +	s.Provisioned = &v
    +	return s
    +}
    +
    +// SetRequestTime sets the RequestTime field's value.
    +func (s *ProvisionedBandwidth) SetRequestTime(v time.Time) *ProvisionedBandwidth {
    +	s.RequestTime = &v
    +	return s
    +}
    +
    +// SetRequested sets the Requested field's value.
    +func (s *ProvisionedBandwidth) SetRequested(v string) *ProvisionedBandwidth {
    +	s.Requested = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ProvisionedBandwidth) SetStatus(v string) *ProvisionedBandwidth {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes the result of the purchase.
    +type Purchase struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The currency in which the UpfrontPrice and HourlyPrice amounts are specified.
    +	// At this time, the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The duration of the reservation's term in seconds.
    +	Duration *int64 `locationName:"duration" type:"integer"`
    +
    +	// The IDs of the Dedicated Hosts associated with the reservation.
    +	HostIdSet []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the reservation.
    +	HostReservationId *string `locationName:"hostReservationId" type:"string"`
    +
    +	// The hourly price of the reservation per hour.
    +	HourlyPrice *string `locationName:"hourlyPrice" type:"string"`
    +
    +	// The instance family on the Dedicated Host that the reservation can be associated
    +	// with.
    +	InstanceFamily *string `locationName:"instanceFamily" type:"string"`
    +
    +	// The payment option for the reservation.
    +	PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"`
    +
    +	// The upfront price of the reservation.
    +	UpfrontPrice *string `locationName:"upfrontPrice" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Purchase) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Purchase) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *Purchase) SetCurrencyCode(v string) *Purchase {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetDuration sets the Duration field's value.
    +func (s *Purchase) SetDuration(v int64) *Purchase {
    +	s.Duration = &v
    +	return s
    +}
    +
    +// SetHostIdSet sets the HostIdSet field's value.
    +func (s *Purchase) SetHostIdSet(v []*string) *Purchase {
    +	s.HostIdSet = v
    +	return s
    +}
    +
    +// SetHostReservationId sets the HostReservationId field's value.
    +func (s *Purchase) SetHostReservationId(v string) *Purchase {
    +	s.HostReservationId = &v
    +	return s
    +}
    +
    +// SetHourlyPrice sets the HourlyPrice field's value.
    +func (s *Purchase) SetHourlyPrice(v string) *Purchase {
    +	s.HourlyPrice = &v
    +	return s
    +}
    +
    +// SetInstanceFamily sets the InstanceFamily field's value.
    +func (s *Purchase) SetInstanceFamily(v string) *Purchase {
    +	s.InstanceFamily = &v
    +	return s
    +}
    +
    +// SetPaymentOption sets the PaymentOption field's value.
    +func (s *Purchase) SetPaymentOption(v string) *Purchase {
    +	s.PaymentOption = &v
    +	return s
    +}
    +
    +// SetUpfrontPrice sets the UpfrontPrice field's value.
    +func (s *Purchase) SetUpfrontPrice(v string) *Purchase {
    +	s.UpfrontPrice = &v
    +	return s
    +}
    +
    +type PurchaseHostReservationInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure idempotency of the
    +	// request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	ClientToken *string `type:"string"`
    +
    +	// The currency in which the totalUpfrontPrice, LimitPrice, and totalHourlyPrice
    +	// amounts are specified. At this time, the only supported currency is USD.
    +	CurrencyCode *string `type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The ID/s of the Dedicated Host/s that the reservation will be associated
    +	// with.
    +	//
    +	// HostIdSet is a required field
    +	HostIdSet []*string `locationNameList:"item" type:"list" required:"true"`
    +
    +	// The specified limit is checked against the total upfront cost of the reservation
    +	// (calculated as the offering's upfront cost multiplied by the host count).
    +	// If the total upfront cost is greater than the specified price limit, the
    +	// request will fail. This is used to ensure that the purchase does not exceed
    +	// the expected upfront cost of the purchase. At this time, the only supported
    +	// currency is USD. For example, to indicate a limit price of USD 100, specify
    +	// 100.00.
    +	LimitPrice *string `type:"string"`
    +
    +	// The ID of the offering.
    +	//
    +	// OfferingId is a required field
    +	OfferingId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseHostReservationInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseHostReservationInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *PurchaseHostReservationInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "PurchaseHostReservationInput"}
    +	if s.HostIdSet == nil {
    +		invalidParams.Add(request.NewErrParamRequired("HostIdSet"))
    +	}
    +	if s.OfferingId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("OfferingId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *PurchaseHostReservationInput) SetClientToken(v string) *PurchaseHostReservationInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *PurchaseHostReservationInput) SetCurrencyCode(v string) *PurchaseHostReservationInput {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetHostIdSet sets the HostIdSet field's value.
    +func (s *PurchaseHostReservationInput) SetHostIdSet(v []*string) *PurchaseHostReservationInput {
    +	s.HostIdSet = v
    +	return s
    +}
    +
    +// SetLimitPrice sets the LimitPrice field's value.
    +func (s *PurchaseHostReservationInput) SetLimitPrice(v string) *PurchaseHostReservationInput {
    +	s.LimitPrice = &v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostReservationInput {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +type PurchaseHostReservationOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure idempotency of the
    +	// request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
    +	// in the Amazon Elastic Compute Cloud User Guide
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// The currency in which the totalUpfrontPrice and totalHourlyPrice amounts
    +	// are specified. At this time, the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// Describes the details of the purchase.
    +	Purchase []*Purchase `locationName:"purchase" type:"list"`
    +
    +	// The total hourly price of the reservation calculated per hour.
    +	TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"`
    +
    +	// The total amount that will be charged to your account when you purchase the
    +	// reservation.
    +	TotalUpfrontPrice *string `locationName:"totalUpfrontPrice" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseHostReservationOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseHostReservationOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *PurchaseHostReservationOutput) SetClientToken(v string) *PurchaseHostReservationOutput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *PurchaseHostReservationOutput) SetCurrencyCode(v string) *PurchaseHostReservationOutput {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetPurchase sets the Purchase field's value.
    +func (s *PurchaseHostReservationOutput) SetPurchase(v []*Purchase) *PurchaseHostReservationOutput {
    +	s.Purchase = v
    +	return s
    +}
    +
    +// SetTotalHourlyPrice sets the TotalHourlyPrice field's value.
    +func (s *PurchaseHostReservationOutput) SetTotalHourlyPrice(v string) *PurchaseHostReservationOutput {
    +	s.TotalHourlyPrice = &v
    +	return s
    +}
    +
    +// SetTotalUpfrontPrice sets the TotalUpfrontPrice field's value.
    +func (s *PurchaseHostReservationOutput) SetTotalUpfrontPrice(v string) *PurchaseHostReservationOutput {
    +	s.TotalUpfrontPrice = &v
    +	return s
    +}
    +
    +// Describes a request to purchase Scheduled Instances.
    +type PurchaseRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of instances.
    +	//
    +	// InstanceCount is a required field
    +	InstanceCount *int64 `type:"integer" required:"true"`
    +
    +	// The purchase token.
    +	//
    +	// PurchaseToken is a required field
    +	PurchaseToken *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *PurchaseRequest) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "PurchaseRequest"}
    +	if s.InstanceCount == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceCount"))
    +	}
    +	if s.PurchaseToken == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PurchaseToken"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *PurchaseRequest) SetInstanceCount(v int64) *PurchaseRequest {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetPurchaseToken sets the PurchaseToken field's value.
    +func (s *PurchaseRequest) SetPurchaseToken(v string) *PurchaseRequest {
    +	s.PurchaseToken = &v
    +	return s
    +}
    +
    +// Contains the parameters for PurchaseReservedInstancesOffering.
    +type PurchaseReservedInstancesOfferingInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The number of Reserved Instances to purchase.
    +	//
    +	// InstanceCount is a required field
    +	InstanceCount *int64 `type:"integer" required:"true"`
    +
    +	// Specified for Reserved Instance Marketplace offerings to limit the total
    +	// order and ensure that the Reserved Instances are not purchased at unexpected
    +	// prices.
    +	LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"`
    +
    +	// The ID of the Reserved Instance offering to purchase.
    +	//
    +	// ReservedInstancesOfferingId is a required field
    +	ReservedInstancesOfferingId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseReservedInstancesOfferingInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseReservedInstancesOfferingInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *PurchaseReservedInstancesOfferingInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedInstancesOfferingInput"}
    +	if s.InstanceCount == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceCount"))
    +	}
    +	if s.ReservedInstancesOfferingId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReservedInstancesOfferingId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *PurchaseReservedInstancesOfferingInput) SetDryRun(v bool) *PurchaseReservedInstancesOfferingInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *PurchaseReservedInstancesOfferingInput) SetInstanceCount(v int64) *PurchaseReservedInstancesOfferingInput {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetLimitPrice sets the LimitPrice field's value.
    +func (s *PurchaseReservedInstancesOfferingInput) SetLimitPrice(v *ReservedInstanceLimitPrice) *PurchaseReservedInstancesOfferingInput {
    +	s.LimitPrice = v
    +	return s
    +}
    +
    +// SetReservedInstancesOfferingId sets the ReservedInstancesOfferingId field's value.
    +func (s *PurchaseReservedInstancesOfferingInput) SetReservedInstancesOfferingId(v string) *PurchaseReservedInstancesOfferingInput {
    +	s.ReservedInstancesOfferingId = &v
    +	return s
    +}
    +
    +// Contains the output of PurchaseReservedInstancesOffering.
    +type PurchaseReservedInstancesOfferingOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IDs of the purchased Reserved Instances.
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseReservedInstancesOfferingOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseReservedInstancesOfferingOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *PurchaseReservedInstancesOfferingOutput) SetReservedInstancesId(v string) *PurchaseReservedInstancesOfferingOutput {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// Contains the parameters for PurchaseScheduledInstances.
    +type PurchaseScheduledInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier that ensures the idempotency of the request.
    +	// For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `type:"string" idempotencyToken:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// One or more purchase requests.
    +	//
    +	// PurchaseRequests is a required field
    +	PurchaseRequests []*PurchaseRequest `locationName:"PurchaseRequest" locationNameList:"PurchaseRequest" min:"1" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseScheduledInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseScheduledInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *PurchaseScheduledInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "PurchaseScheduledInstancesInput"}
    +	if s.PurchaseRequests == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PurchaseRequests"))
    +	}
    +	if s.PurchaseRequests != nil && len(s.PurchaseRequests) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("PurchaseRequests", 1))
    +	}
    +	if s.PurchaseRequests != nil {
    +		for i, v := range s.PurchaseRequests {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PurchaseRequests", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *PurchaseScheduledInstancesInput) SetClientToken(v string) *PurchaseScheduledInstancesInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *PurchaseScheduledInstancesInput) SetDryRun(v bool) *PurchaseScheduledInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPurchaseRequests sets the PurchaseRequests field's value.
    +func (s *PurchaseScheduledInstancesInput) SetPurchaseRequests(v []*PurchaseRequest) *PurchaseScheduledInstancesInput {
    +	s.PurchaseRequests = v
    +	return s
    +}
    +
    +// Contains the output of PurchaseScheduledInstances.
    +type PurchaseScheduledInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the Scheduled Instances.
    +	ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s PurchaseScheduledInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s PurchaseScheduledInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetScheduledInstanceSet sets the ScheduledInstanceSet field's value.
    +func (s *PurchaseScheduledInstancesOutput) SetScheduledInstanceSet(v []*ScheduledInstance) *PurchaseScheduledInstancesOutput {
    +	s.ScheduledInstanceSet = v
    +	return s
    +}
    +
    +// Contains the parameters for RebootInstances.
    +type RebootInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// InstanceIds is a required field
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RebootInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RebootInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RebootInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RebootInstancesInput"}
    +	if s.InstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RebootInstancesInput) SetDryRun(v bool) *RebootInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *RebootInstancesInput) SetInstanceIds(v []*string) *RebootInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +type RebootInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s RebootInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RebootInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Describes a recurring charge.
    +type RecurringCharge struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The amount of the recurring charge.
    +	Amount *float64 `locationName:"amount" type:"double"`
    +
    +	// The frequency of the recurring charge.
    +	Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"`
    +}
    +
    +// String returns the string representation
    +func (s RecurringCharge) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RecurringCharge) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAmount sets the Amount field's value.
    +func (s *RecurringCharge) SetAmount(v float64) *RecurringCharge {
    +	s.Amount = &v
    +	return s
    +}
    +
    +// SetFrequency sets the Frequency field's value.
    +func (s *RecurringCharge) SetFrequency(v string) *RecurringCharge {
    +	s.Frequency = &v
    +	return s
    +}
    +
    +// Describes a region.
    +type Region struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The region service endpoint.
    +	Endpoint *string `locationName:"regionEndpoint" type:"string"`
    +
    +	// The name of the region.
    +	RegionName *string `locationName:"regionName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Region) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Region) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEndpoint sets the Endpoint field's value.
    +func (s *Region) SetEndpoint(v string) *Region {
    +	s.Endpoint = &v
    +	return s
    +}
    +
    +// SetRegionName sets the RegionName field's value.
    +func (s *Region) SetRegionName(v string) *Region {
    +	s.RegionName = &v
    +	return s
    +}
    +
    +// Contains the parameters for RegisterImage.
    +type RegisterImageInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The architecture of the AMI.
    +	//
    +	// Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs,
    +	// the architecture specified in the manifest file.
    +	Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"`
    +
    +	// One or more block device mapping entries.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
    +
    +	// A description for your AMI.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Set to true to enable enhanced networking with ENA for the AMI and any instances
    +	// that you launch from the AMI.
    +	//
    +	// This option is supported only for HVM AMIs. Specifying this option with a
    +	// PV AMI can make instances launched from the AMI unreachable.
    +	EnaSupport *bool `locationName:"enaSupport" type:"boolean"`
    +
    +	// The full path to your AMI manifest in Amazon S3 storage.
    +	ImageLocation *string `type:"string"`
    +
    +	// The ID of the kernel.
    +	KernelId *string `locationName:"kernelId" type:"string"`
    +
    +	// A name for your AMI.
    +	//
    +	// Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets
    +	// ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('),
    +	// at-signs (@), or underscores(_)
    +	//
    +	// Name is a required field
    +	Name *string `locationName:"name" type:"string" required:"true"`
    +
    +	// The ID of the RAM disk.
    +	RamdiskId *string `locationName:"ramdiskId" type:"string"`
    +
    +	// The name of the root device (for example, /dev/sda1, or /dev/xvda).
    +	RootDeviceName *string `locationName:"rootDeviceName" type:"string"`
    +
    +	// Set to simple to enable enhanced networking with the Intel 82599 Virtual
    +	// Function interface for the AMI and any instances that you launch from the
    +	// AMI.
    +	//
    +	// There is no way to disable sriovNetSupport at this time.
    +	//
    +	// This option is supported only for HVM AMIs. Specifying this option with a
    +	// PV AMI can make instances launched from the AMI unreachable.
    +	SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"`
    +
    +	// The type of virtualization.
    +	//
    +	// Default: paravirtual
    +	VirtualizationType *string `locationName:"virtualizationType" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s RegisterImageInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RegisterImageInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RegisterImageInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RegisterImageInput"}
    +	if s.Name == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Name"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetArchitecture sets the Architecture field's value.
    +func (s *RegisterImageInput) SetArchitecture(v string) *RegisterImageInput {
    +	s.Architecture = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *RegisterImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RegisterImageInput {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *RegisterImageInput) SetDescription(v string) *RegisterImageInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RegisterImageInput) SetDryRun(v bool) *RegisterImageInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEnaSupport sets the EnaSupport field's value.
    +func (s *RegisterImageInput) SetEnaSupport(v bool) *RegisterImageInput {
    +	s.EnaSupport = &v
    +	return s
    +}
    +
    +// SetImageLocation sets the ImageLocation field's value.
    +func (s *RegisterImageInput) SetImageLocation(v string) *RegisterImageInput {
    +	s.ImageLocation = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *RegisterImageInput) SetKernelId(v string) *RegisterImageInput {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *RegisterImageInput) SetName(v string) *RegisterImageInput {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *RegisterImageInput) SetRamdiskId(v string) *RegisterImageInput {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetRootDeviceName sets the RootDeviceName field's value.
    +func (s *RegisterImageInput) SetRootDeviceName(v string) *RegisterImageInput {
    +	s.RootDeviceName = &v
    +	return s
    +}
    +
    +// SetSriovNetSupport sets the SriovNetSupport field's value.
    +func (s *RegisterImageInput) SetSriovNetSupport(v string) *RegisterImageInput {
    +	s.SriovNetSupport = &v
    +	return s
    +}
    +
    +// SetVirtualizationType sets the VirtualizationType field's value.
    +func (s *RegisterImageInput) SetVirtualizationType(v string) *RegisterImageInput {
    +	s.VirtualizationType = &v
    +	return s
    +}
    +
    +// Contains the output of RegisterImage.
    +type RegisterImageOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the newly registered AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s RegisterImageOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RegisterImageOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *RegisterImageOutput) SetImageId(v string) *RegisterImageOutput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// Contains the parameters for RejectVpcPeeringConnection.
    +type RejectVpcPeeringConnectionInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the VPC peering connection.
    +	//
    +	// VpcPeeringConnectionId is a required field
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RejectVpcPeeringConnectionInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RejectVpcPeeringConnectionInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RejectVpcPeeringConnectionInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RejectVpcPeeringConnectionInput"}
    +	if s.VpcPeeringConnectionId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RejectVpcPeeringConnectionInput) SetDryRun(v bool) *RejectVpcPeeringConnectionInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *RejectVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *RejectVpcPeeringConnectionInput {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Contains the output of RejectVpcPeeringConnection.
    +type RejectVpcPeeringConnectionOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Returns true if the request succeeds; otherwise, it returns an error.
    +	Return *bool `locationName:"return" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s RejectVpcPeeringConnectionOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RejectVpcPeeringConnectionOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReturn sets the Return field's value.
    +func (s *RejectVpcPeeringConnectionOutput) SetReturn(v bool) *RejectVpcPeeringConnectionOutput {
    +	s.Return = &v
    +	return s
    +}
    +
    +// Contains the parameters for ReleaseAddress.
    +type ReleaseAddressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-VPC] The allocation ID. Required for EC2-VPC.
    +	AllocationId *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// [EC2-Classic] The Elastic IP address. Required for EC2-Classic.
    +	PublicIp *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReleaseAddressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReleaseAddressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllocationId sets the AllocationId field's value.
    +func (s *ReleaseAddressInput) SetAllocationId(v string) *ReleaseAddressInput {
    +	s.AllocationId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ReleaseAddressInput) SetDryRun(v bool) *ReleaseAddressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *ReleaseAddressInput) SetPublicIp(v string) *ReleaseAddressInput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +type ReleaseAddressOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ReleaseAddressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReleaseAddressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ReleaseHosts.
    +type ReleaseHostsInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IDs of the Dedicated Hosts you want to release.
    +	//
    +	// HostIds is a required field
    +	HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ReleaseHostsInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReleaseHostsInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ReleaseHostsInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ReleaseHostsInput"}
    +	if s.HostIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("HostIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetHostIds sets the HostIds field's value.
    +func (s *ReleaseHostsInput) SetHostIds(v []*string) *ReleaseHostsInput {
    +	s.HostIds = v
    +	return s
    +}
    +
    +// Contains the output of ReleaseHosts.
    +type ReleaseHostsOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IDs of the Dedicated Hosts that were successfully released.
    +	Successful []*string `locationName:"successful" locationNameList:"item" type:"list"`
    +
    +	// The IDs of the Dedicated Hosts that could not be released, including an error
    +	// message.
    +	Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s ReleaseHostsOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReleaseHostsOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSuccessful sets the Successful field's value.
    +func (s *ReleaseHostsOutput) SetSuccessful(v []*string) *ReleaseHostsOutput {
    +	s.Successful = v
    +	return s
    +}
    +
    +// SetUnsuccessful sets the Unsuccessful field's value.
    +func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHostsOutput {
    +	s.Unsuccessful = v
    +	return s
    +}
    +
    +// Contains the parameters for ReplaceNetworkAclAssociation.
    +type ReplaceNetworkAclAssociationInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the current association between the original network ACL and the
    +	// subnet.
    +	//
    +	// AssociationId is a required field
    +	AssociationId *string `locationName:"associationId" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the new network ACL to associate with the subnet.
    +	//
    +	// NetworkAclId is a required field
    +	NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceNetworkAclAssociationInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceNetworkAclAssociationInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ReplaceNetworkAclAssociationInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ReplaceNetworkAclAssociationInput"}
    +	if s.AssociationId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AssociationId"))
    +	}
    +	if s.NetworkAclId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkAclId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *ReplaceNetworkAclAssociationInput) SetAssociationId(v string) *ReplaceNetworkAclAssociationInput {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ReplaceNetworkAclAssociationInput) SetDryRun(v bool) *ReplaceNetworkAclAssociationInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *ReplaceNetworkAclAssociationInput) SetNetworkAclId(v string) *ReplaceNetworkAclAssociationInput {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +// Contains the output of ReplaceNetworkAclAssociation.
    +type ReplaceNetworkAclAssociationOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the new association.
    +	NewAssociationId *string `locationName:"newAssociationId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceNetworkAclAssociationOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceNetworkAclAssociationOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNewAssociationId sets the NewAssociationId field's value.
    +func (s *ReplaceNetworkAclAssociationOutput) SetNewAssociationId(v string) *ReplaceNetworkAclAssociationOutput {
    +	s.NewAssociationId = &v
    +	return s
    +}
    +
    +// Contains the parameters for ReplaceNetworkAclEntry.
    +type ReplaceNetworkAclEntryInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The network range to allow or deny, in CIDR notation.
    +	//
    +	// CidrBlock is a required field
    +	CidrBlock *string `locationName:"cidrBlock" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Indicates whether to replace the egress rule.
    +	//
    +	// Default: If no value is specified, we replace the ingress rule.
    +	//
    +	// Egress is a required field
    +	Egress *bool `locationName:"egress" type:"boolean" required:"true"`
    +
    +	// ICMP protocol: The ICMP type and code. Required if specifying 1 (ICMP) for
    +	// the protocol.
    +	IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"`
    +
    +	// The ID of the ACL.
    +	//
    +	// NetworkAclId is a required field
    +	NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"`
    +
    +	// TCP or UDP protocols: The range of ports the rule applies to. Required if
    +	// specifying 6 (TCP) or 17 (UDP) for the protocol.
    +	PortRange *PortRange `locationName:"portRange" type:"structure"`
    +
    +	// The IP protocol. You can specify all or -1 to mean all protocols.
    +	//
    +	// Protocol is a required field
    +	Protocol *string `locationName:"protocol" type:"string" required:"true"`
    +
    +	// Indicates whether to allow or deny the traffic that matches the rule.
    +	//
    +	// RuleAction is a required field
    +	RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"`
    +
    +	// The rule number of the entry to replace.
    +	//
    +	// RuleNumber is a required field
    +	RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceNetworkAclEntryInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceNetworkAclEntryInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ReplaceNetworkAclEntryInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ReplaceNetworkAclEntryInput"}
    +	if s.CidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("CidrBlock"))
    +	}
    +	if s.Egress == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Egress"))
    +	}
    +	if s.NetworkAclId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkAclId"))
    +	}
    +	if s.Protocol == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Protocol"))
    +	}
    +	if s.RuleAction == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RuleAction"))
    +	}
    +	if s.RuleNumber == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RuleNumber"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetCidrBlock(v string) *ReplaceNetworkAclEntryInput {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetDryRun(v bool) *ReplaceNetworkAclEntryInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEgress sets the Egress field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetEgress(v bool) *ReplaceNetworkAclEntryInput {
    +	s.Egress = &v
    +	return s
    +}
    +
    +// SetIcmpTypeCode sets the IcmpTypeCode field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetIcmpTypeCode(v *IcmpTypeCode) *ReplaceNetworkAclEntryInput {
    +	s.IcmpTypeCode = v
    +	return s
    +}
    +
    +// SetNetworkAclId sets the NetworkAclId field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetNetworkAclId(v string) *ReplaceNetworkAclEntryInput {
    +	s.NetworkAclId = &v
    +	return s
    +}
    +
    +// SetPortRange sets the PortRange field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetPortRange(v *PortRange) *ReplaceNetworkAclEntryInput {
    +	s.PortRange = v
    +	return s
    +}
    +
    +// SetProtocol sets the Protocol field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetProtocol(v string) *ReplaceNetworkAclEntryInput {
    +	s.Protocol = &v
    +	return s
    +}
    +
    +// SetRuleAction sets the RuleAction field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetRuleAction(v string) *ReplaceNetworkAclEntryInput {
    +	s.RuleAction = &v
    +	return s
    +}
    +
    +// SetRuleNumber sets the RuleNumber field's value.
    +func (s *ReplaceNetworkAclEntryInput) SetRuleNumber(v int64) *ReplaceNetworkAclEntryInput {
    +	s.RuleNumber = &v
    +	return s
    +}
    +
    +type ReplaceNetworkAclEntryOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceNetworkAclEntryOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceNetworkAclEntryOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ReplaceRoute.
    +type ReplaceRouteInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR address block used for the destination match. The value you provide
    +	// must match the CIDR of an existing route in the table.
    +	//
    +	// DestinationCidrBlock is a required field
    +	DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of an Internet gateway or virtual private gateway.
    +	GatewayId *string `locationName:"gatewayId" type:"string"`
    +
    +	// The ID of a NAT instance in your VPC.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The ID of a NAT gateway.
    +	NatGatewayId *string `locationName:"natGatewayId" type:"string"`
    +
    +	// The ID of a network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// The ID of the route table.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"`
    +
    +	// The ID of a VPC peering connection.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceRouteInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceRouteInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ReplaceRouteInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ReplaceRouteInput"}
    +	if s.DestinationCidrBlock == nil {
    +		invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock"))
    +	}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *ReplaceRouteInput) SetDestinationCidrBlock(v string) *ReplaceRouteInput {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ReplaceRouteInput) SetDryRun(v bool) *ReplaceRouteInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetGatewayId sets the GatewayId field's value.
    +func (s *ReplaceRouteInput) SetGatewayId(v string) *ReplaceRouteInput {
    +	s.GatewayId = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ReplaceRouteInput) SetInstanceId(v string) *ReplaceRouteInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetNatGatewayId sets the NatGatewayId field's value.
    +func (s *ReplaceRouteInput) SetNatGatewayId(v string) *ReplaceRouteInput {
    +	s.NatGatewayId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *ReplaceRouteInput) SetNetworkInterfaceId(v string) *ReplaceRouteInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *ReplaceRouteInput) SetRouteTableId(v string) *ReplaceRouteInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *ReplaceRouteInput) SetVpcPeeringConnectionId(v string) *ReplaceRouteInput {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +type ReplaceRouteOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceRouteOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceRouteOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ReplaceRouteTableAssociation.
    +type ReplaceRouteTableAssociationInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The association ID.
    +	//
    +	// AssociationId is a required field
    +	AssociationId *string `locationName:"associationId" type:"string" required:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the new route table to associate with the subnet.
    +	//
    +	// RouteTableId is a required field
    +	RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceRouteTableAssociationInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceRouteTableAssociationInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ReplaceRouteTableAssociationInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ReplaceRouteTableAssociationInput"}
    +	if s.AssociationId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("AssociationId"))
    +	}
    +	if s.RouteTableId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RouteTableId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAssociationId sets the AssociationId field's value.
    +func (s *ReplaceRouteTableAssociationInput) SetAssociationId(v string) *ReplaceRouteTableAssociationInput {
    +	s.AssociationId = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ReplaceRouteTableAssociationInput) SetDryRun(v bool) *ReplaceRouteTableAssociationInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *ReplaceRouteTableAssociationInput) SetRouteTableId(v string) *ReplaceRouteTableAssociationInput {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +// Contains the output of ReplaceRouteTableAssociation.
    +type ReplaceRouteTableAssociationOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the new association.
    +	NewAssociationId *string `locationName:"newAssociationId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReplaceRouteTableAssociationOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReplaceRouteTableAssociationOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetNewAssociationId sets the NewAssociationId field's value.
    +func (s *ReplaceRouteTableAssociationOutput) SetNewAssociationId(v string) *ReplaceRouteTableAssociationOutput {
    +	s.NewAssociationId = &v
    +	return s
    +}
    +
    +// Contains the parameters for ReportInstanceStatus.
    +type ReportInstanceStatusInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Descriptive text about the health state of your instance.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The time at which the reported instance health state ended.
    +	EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// One or more instances.
    +	//
    +	// Instances is a required field
    +	Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +
    +	// One or more reason codes that describes the health state of your instance.
    +	//
    +	//    * instance-stuck-in-state: My instance is stuck in a state.
    +	//
    +	//    * unresponsive: My instance is unresponsive.
    +	//
    +	//    * not-accepting-credentials: My instance is not accepting my credentials.
    +	//
    +	//    * password-not-available: A password is not available for my instance.
    +	//
    +	//    * performance-network: My instance is experiencing performance problems
    +	//    which I believe are network related.
    +	//
    +	//    * performance-instance-store: My instance is experiencing performance
    +	//    problems which I believe are related to the instance stores.
    +	//
    +	//    * performance-ebs-volume: My instance is experiencing performance problems
    +	//    which I believe are related to an EBS volume.
    +	//
    +	//    * performance-other: My instance is experiencing performance problems.
    +	//
    +	//    * other: [explain using the description parameter]
    +	//
    +	// ReasonCodes is a required field
    +	ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"`
    +
    +	// The time at which the reported instance health state began.
    +	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The status of all instances listed.
    +	//
    +	// Status is a required field
    +	Status *string `locationName:"status" type:"string" required:"true" enum:"ReportStatusType"`
    +}
    +
    +// String returns the string representation
    +func (s ReportInstanceStatusInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReportInstanceStatusInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ReportInstanceStatusInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ReportInstanceStatusInput"}
    +	if s.Instances == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Instances"))
    +	}
    +	if s.ReasonCodes == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ReasonCodes"))
    +	}
    +	if s.Status == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Status"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ReportInstanceStatusInput) SetDescription(v string) *ReportInstanceStatusInput {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ReportInstanceStatusInput) SetDryRun(v bool) *ReportInstanceStatusInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEndTime sets the EndTime field's value.
    +func (s *ReportInstanceStatusInput) SetEndTime(v time.Time) *ReportInstanceStatusInput {
    +	s.EndTime = &v
    +	return s
    +}
    +
    +// SetInstances sets the Instances field's value.
    +func (s *ReportInstanceStatusInput) SetInstances(v []*string) *ReportInstanceStatusInput {
    +	s.Instances = v
    +	return s
    +}
    +
    +// SetReasonCodes sets the ReasonCodes field's value.
    +func (s *ReportInstanceStatusInput) SetReasonCodes(v []*string) *ReportInstanceStatusInput {
    +	s.ReasonCodes = v
    +	return s
    +}
    +
    +// SetStartTime sets the StartTime field's value.
    +func (s *ReportInstanceStatusInput) SetStartTime(v time.Time) *ReportInstanceStatusInput {
    +	s.StartTime = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ReportInstanceStatusInput) SetStatus(v string) *ReportInstanceStatusInput {
    +	s.Status = &v
    +	return s
    +}
    +
    +type ReportInstanceStatusOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ReportInstanceStatusOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReportInstanceStatusOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for RequestSpotFleet.
    +type RequestSpotFleetInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The configuration for the Spot fleet request.
    +	//
    +	// SpotFleetRequestConfig is a required field
    +	SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RequestSpotFleetInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RequestSpotFleetInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RequestSpotFleetInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RequestSpotFleetInput"}
    +	if s.SpotFleetRequestConfig == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestConfig"))
    +	}
    +	if s.SpotFleetRequestConfig != nil {
    +		if err := s.SpotFleetRequestConfig.Validate(); err != nil {
    +			invalidParams.AddNested("SpotFleetRequestConfig", err.(request.ErrInvalidParams))
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RequestSpotFleetInput) SetDryRun(v bool) *RequestSpotFleetInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestConfig sets the SpotFleetRequestConfig field's value.
    +func (s *RequestSpotFleetInput) SetSpotFleetRequestConfig(v *SpotFleetRequestConfigData) *RequestSpotFleetInput {
    +	s.SpotFleetRequestConfig = v
    +	return s
    +}
    +
    +// Contains the output of RequestSpotFleet.
    +type RequestSpotFleetOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RequestSpotFleetOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RequestSpotFleetOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *RequestSpotFleetOutput) SetSpotFleetRequestId(v string) *RequestSpotFleetOutput {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// Contains the parameters for RequestSpotInstances.
    +type RequestSpotInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The user-specified name for a logical grouping of bids.
    +	//
    +	// When you specify an Availability Zone group in a Spot Instance request, all
    +	// Spot instances in the request are launched in the same Availability Zone.
    +	// Instance proximity is maintained with this parameter, but the choice of Availability
    +	// Zone is not. The group applies only to bids for Spot Instances of the same
    +	// instance type. Any additional Spot instance requests that are specified with
    +	// the same Availability Zone group name are launched in that same Availability
    +	// Zone, as long as at least one instance from the group is still active.
    +	//
    +	// If there is no active instance running in the Availability Zone group that
    +	// you specify for a new Spot instance request (all instances are terminated,
    +	// the bid is expired, or the bid falls below current market), then Amazon EC2
    +	// launches the instance in any Availability Zone where the constraint can be
    +	// met. Consequently, the subsequent set of Spot instances could be placed in
    +	// a different zone from the original request, even if you specified the same
    +	// Availability Zone group.
    +	//
    +	// Default: Instances are launched in any available Availability Zone.
    +	AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"`
    +
    +	// The required duration for the Spot instances (also known as Spot blocks),
    +	// in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300,
    +	// or 360).
    +	//
    +	// The duration period starts as soon as your Spot instance receives its instance
    +	// ID. At the end of the duration period, Amazon EC2 marks the Spot instance
    +	// for termination and provides a Spot instance termination notice, which gives
    +	// the instance a two-minute warning before it terminates.
    +	//
    +	// Note that you can't specify an Availability Zone group or a launch group
    +	// if you specify a duration.
    +	BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"`
    +
    +	// Unique, case-sensitive identifier that you provide to ensure the idempotency
    +	// of the request. For more information, see How to Ensure Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The maximum number of Spot instances to launch.
    +	//
    +	// Default: 1
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer"`
    +
    +	// The instance launch group. Launch groups are Spot instances that launch together
    +	// and terminate together.
    +	//
    +	// Default: Instances are launched and terminated individually
    +	LaunchGroup *string `locationName:"launchGroup" type:"string"`
    +
    +	// Describes the launch specification for an instance.
    +	LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"`
    +
    +	// The maximum hourly price (bid) for any Spot instance launched to fulfill
    +	// the request.
    +	//
    +	// SpotPrice is a required field
    +	SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"`
    +
    +	// The Spot instance request type.
    +	//
    +	// Default: one-time
    +	Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"`
    +
    +	// The start date of the request. If this is a one-time request, the request
    +	// becomes active at this date and time and remains active until all instances
    +	// launch, the request expires, or the request is canceled. If the request is
    +	// persistent, the request becomes active at this date and time and remains
    +	// active until it expires or is canceled.
    +	//
    +	// Default: The request is effective indefinitely.
    +	ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The end date of the request. If this is a one-time request, the request remains
    +	// active until all instances launch, the request is canceled, or this date
    +	// is reached. If the request is persistent, it remains active until it is canceled
    +	// or this date and time is reached.
    +	//
    +	// Default: The request is effective indefinitely.
    +	ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s RequestSpotInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RequestSpotInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RequestSpotInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RequestSpotInstancesInput"}
    +	if s.SpotPrice == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotPrice"))
    +	}
    +	if s.LaunchSpecification != nil {
    +		if err := s.LaunchSpecification.Validate(); err != nil {
    +			invalidParams.AddNested("LaunchSpecification", err.(request.ErrInvalidParams))
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAvailabilityZoneGroup sets the AvailabilityZoneGroup field's value.
    +func (s *RequestSpotInstancesInput) SetAvailabilityZoneGroup(v string) *RequestSpotInstancesInput {
    +	s.AvailabilityZoneGroup = &v
    +	return s
    +}
    +
    +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value.
    +func (s *RequestSpotInstancesInput) SetBlockDurationMinutes(v int64) *RequestSpotInstancesInput {
    +	s.BlockDurationMinutes = &v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *RequestSpotInstancesInput) SetClientToken(v string) *RequestSpotInstancesInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RequestSpotInstancesInput) SetDryRun(v bool) *RequestSpotInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *RequestSpotInstancesInput) SetInstanceCount(v int64) *RequestSpotInstancesInput {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetLaunchGroup sets the LaunchGroup field's value.
    +func (s *RequestSpotInstancesInput) SetLaunchGroup(v string) *RequestSpotInstancesInput {
    +	s.LaunchGroup = &v
    +	return s
    +}
    +
    +// SetLaunchSpecification sets the LaunchSpecification field's value.
    +func (s *RequestSpotInstancesInput) SetLaunchSpecification(v *RequestSpotLaunchSpecification) *RequestSpotInstancesInput {
    +	s.LaunchSpecification = v
    +	return s
    +}
    +
    +// SetSpotPrice sets the SpotPrice field's value.
    +func (s *RequestSpotInstancesInput) SetSpotPrice(v string) *RequestSpotInstancesInput {
    +	s.SpotPrice = &v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *RequestSpotInstancesInput) SetType(v string) *RequestSpotInstancesInput {
    +	s.Type = &v
    +	return s
    +}
    +
    +// SetValidFrom sets the ValidFrom field's value.
    +func (s *RequestSpotInstancesInput) SetValidFrom(v time.Time) *RequestSpotInstancesInput {
    +	s.ValidFrom = &v
    +	return s
    +}
    +
    +// SetValidUntil sets the ValidUntil field's value.
    +func (s *RequestSpotInstancesInput) SetValidUntil(v time.Time) *RequestSpotInstancesInput {
    +	s.ValidUntil = &v
    +	return s
    +}
    +
    +// Contains the output of RequestSpotInstances.
    +type RequestSpotInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more Spot instance requests.
    +	SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s RequestSpotInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RequestSpotInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetSpotInstanceRequests sets the SpotInstanceRequests field's value.
    +func (s *RequestSpotInstancesOutput) SetSpotInstanceRequests(v []*SpotInstanceRequest) *RequestSpotInstancesOutput {
    +	s.SpotInstanceRequests = v
    +	return s
    +}
    +
    +// Describes the launch specification for an instance.
    +type RequestSpotLaunchSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Deprecated.
    +	AddressingType *string `locationName:"addressingType" type:"string"`
    +
    +	// One or more block device mapping entries.
    +	//
    +	// Although you can specify encrypted EBS volumes in this block device mapping
    +	// for your Spot Instances, these volumes are not encrypted.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether the instance is optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal EBS I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS
    +	// Optimized instance.
    +	//
    +	// Default: false
    +	EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
    +
    +	// The IAM instance profile.
    +	IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"`
    +
    +	// The ID of the AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// The ID of the kernel.
    +	KernelId *string `locationName:"kernelId" type:"string"`
    +
    +	// The name of the key pair.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +
    +	// Describes the monitoring for the instance.
    +	Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"`
    +
    +	// One or more network interfaces.
    +	NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"`
    +
    +	// The placement information for the instance.
    +	Placement *SpotPlacement `locationName:"placement" type:"structure"`
    +
    +	// The ID of the RAM disk.
    +	RamdiskId *string `locationName:"ramdiskId" type:"string"`
    +
    +	SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"`
    +
    +	SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"`
    +
    +	// The ID of the subnet in which to launch the instance.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// The user data to make available to the instances. If you are using an AWS
    +	// SDK or command line tool, Base64-encoding is performed for you, and you can
    +	// load the text from a file. Otherwise, you must provide Base64-encoded text.
    +	UserData *string `locationName:"userData" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s RequestSpotLaunchSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RequestSpotLaunchSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RequestSpotLaunchSpecification) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RequestSpotLaunchSpecification"}
    +	if s.Monitoring != nil {
    +		if err := s.Monitoring.Validate(); err != nil {
    +			invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams))
    +		}
    +	}
    +	if s.NetworkInterfaces != nil {
    +		for i, v := range s.NetworkInterfaces {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAddressingType sets the AddressingType field's value.
    +func (s *RequestSpotLaunchSpecification) SetAddressingType(v string) *RequestSpotLaunchSpecification {
    +	s.AddressingType = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *RequestSpotLaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RequestSpotLaunchSpecification {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *RequestSpotLaunchSpecification) SetEbsOptimized(v bool) *RequestSpotLaunchSpecification {
    +	s.EbsOptimized = &v
    +	return s
    +}
    +
    +// SetIamInstanceProfile sets the IamInstanceProfile field's value.
    +func (s *RequestSpotLaunchSpecification) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *RequestSpotLaunchSpecification {
    +	s.IamInstanceProfile = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *RequestSpotLaunchSpecification) SetImageId(v string) *RequestSpotLaunchSpecification {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *RequestSpotLaunchSpecification) SetInstanceType(v string) *RequestSpotLaunchSpecification {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *RequestSpotLaunchSpecification) SetKernelId(v string) *RequestSpotLaunchSpecification {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *RequestSpotLaunchSpecification) SetKeyName(v string) *RequestSpotLaunchSpecification {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *RequestSpotLaunchSpecification) SetMonitoring(v *RunInstancesMonitoringEnabled) *RequestSpotLaunchSpecification {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *RequestSpotLaunchSpecification) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *RequestSpotLaunchSpecification {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *RequestSpotLaunchSpecification) SetPlacement(v *SpotPlacement) *RequestSpotLaunchSpecification {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *RequestSpotLaunchSpecification) SetRamdiskId(v string) *RequestSpotLaunchSpecification {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetSecurityGroupIds sets the SecurityGroupIds field's value.
    +func (s *RequestSpotLaunchSpecification) SetSecurityGroupIds(v []*string) *RequestSpotLaunchSpecification {
    +	s.SecurityGroupIds = v
    +	return s
    +}
    +
    +// SetSecurityGroups sets the SecurityGroups field's value.
    +func (s *RequestSpotLaunchSpecification) SetSecurityGroups(v []*string) *RequestSpotLaunchSpecification {
    +	s.SecurityGroups = v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *RequestSpotLaunchSpecification) SetSubnetId(v string) *RequestSpotLaunchSpecification {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *RequestSpotLaunchSpecification) SetUserData(v string) *RequestSpotLaunchSpecification {
    +	s.UserData = &v
    +	return s
    +}
    +
    +// Describes a reservation.
    +type Reservation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// [EC2-Classic only] One or more security groups.
    +	Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// One or more instances.
    +	Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the AWS account that owns the reservation.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// The ID of the requester that launched the instances on your behalf (for example,
    +	// AWS Management Console or Auto Scaling).
    +	RequesterId *string `locationName:"requesterId" type:"string"`
    +
    +	// The ID of the reservation.
    +	ReservationId *string `locationName:"reservationId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Reservation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Reservation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *Reservation) SetGroups(v []*GroupIdentifier) *Reservation {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetInstances sets the Instances field's value.
    +func (s *Reservation) SetInstances(v []*Instance) *Reservation {
    +	s.Instances = v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *Reservation) SetOwnerId(v string) *Reservation {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetRequesterId sets the RequesterId field's value.
    +func (s *Reservation) SetRequesterId(v string) *Reservation {
    +	s.RequesterId = &v
    +	return s
    +}
    +
    +// SetReservationId sets the ReservationId field's value.
    +func (s *Reservation) SetReservationId(v string) *Reservation {
    +	s.ReservationId = &v
    +	return s
    +}
    +
    +// The cost associated with the Reserved Instance.
    +type ReservationValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The hourly rate of the reservation.
    +	HourlyPrice *string `locationName:"hourlyPrice" type:"string"`
    +
    +	// The balance of the total value (the sum of remainingUpfrontValue + hourlyPrice
    +	// * number of hours remaining).
    +	RemainingTotalValue *string `locationName:"remainingTotalValue" type:"string"`
    +
    +	// The remaining upfront cost of the reservation.
    +	RemainingUpfrontValue *string `locationName:"remainingUpfrontValue" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReservationValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservationValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetHourlyPrice sets the HourlyPrice field's value.
    +func (s *ReservationValue) SetHourlyPrice(v string) *ReservationValue {
    +	s.HourlyPrice = &v
    +	return s
    +}
    +
    +// SetRemainingTotalValue sets the RemainingTotalValue field's value.
    +func (s *ReservationValue) SetRemainingTotalValue(v string) *ReservationValue {
    +	s.RemainingTotalValue = &v
    +	return s
    +}
    +
    +// SetRemainingUpfrontValue sets the RemainingUpfrontValue field's value.
    +func (s *ReservationValue) SetRemainingUpfrontValue(v string) *ReservationValue {
    +	s.RemainingUpfrontValue = &v
    +	return s
    +}
    +
    +// Describes the limit price of a Reserved Instance offering.
    +type ReservedInstanceLimitPrice struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Used for Reserved Instance Marketplace offerings. Specifies the limit price
    +	// on the total order (instanceCount * price).
    +	Amount *float64 `locationName:"amount" type:"double"`
    +
    +	// The currency in which the limitPrice amount is specified. At this time, the
    +	// only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstanceLimitPrice) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstanceLimitPrice) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAmount sets the Amount field's value.
    +func (s *ReservedInstanceLimitPrice) SetAmount(v float64) *ReservedInstanceLimitPrice {
    +	s.Amount = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *ReservedInstanceLimitPrice) SetCurrencyCode(v string) *ReservedInstanceLimitPrice {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// The total value of the Convertible Reserved Instance.
    +type ReservedInstanceReservationValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The total value of the Convertible Reserved Instance that you are exchanging.
    +	ReservationValue *ReservationValue `locationName:"reservationValue" type:"structure"`
    +
    +	// The ID of the Convertible Reserved Instance that you are exchanging.
    +	ReservedInstanceId *string `locationName:"reservedInstanceId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstanceReservationValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstanceReservationValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservationValue sets the ReservationValue field's value.
    +func (s *ReservedInstanceReservationValue) SetReservationValue(v *ReservationValue) *ReservedInstanceReservationValue {
    +	s.ReservationValue = v
    +	return s
    +}
    +
    +// SetReservedInstanceId sets the ReservedInstanceId field's value.
    +func (s *ReservedInstanceReservationValue) SetReservedInstanceId(v string) *ReservedInstanceReservationValue {
    +	s.ReservedInstanceId = &v
    +	return s
    +}
    +
    +// Describes a Reserved Instance.
    +type ReservedInstances struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone in which the Reserved Instance can be used.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The currency of the Reserved Instance. It's specified using ISO 4217 standard
    +	// currency codes. At this time, the only supported currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The duration of the Reserved Instance, in seconds.
    +	Duration *int64 `locationName:"duration" type:"long"`
    +
    +	// The time when the Reserved Instance expires.
    +	End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The purchase price of the Reserved Instance.
    +	FixedPrice *float64 `locationName:"fixedPrice" type:"float"`
    +
    +	// The number of reservations purchased.
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer"`
    +
    +	// The tenancy of the instance.
    +	InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"`
    +
    +	// The instance type on which the Reserved Instance can be used.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// The offering class of the Reserved Instance.
    +	OfferingClass *string `locationName:"offeringClass" type:"string" enum:"OfferingClassType"`
    +
    +	// The Reserved Instance offering type.
    +	OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"`
    +
    +	// The Reserved Instance product platform description.
    +	ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"`
    +
    +	// The recurring charge tag assigned to the resource.
    +	RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"`
    +
    +	// The ID of the Reserved Instance.
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
    +
    +	// The scope of the Reserved Instance.
    +	Scope *string `locationName:"scope" type:"string" enum:"scope"`
    +
    +	// The date and time the Reserved Instance started.
    +	Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The state of the Reserved Instance purchase.
    +	State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"`
    +
    +	// Any tags assigned to the resource.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The usage price of the Reserved Instance, per hour.
    +	UsagePrice *float64 `locationName:"usagePrice" type:"float"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstances) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstances) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ReservedInstances) SetAvailabilityZone(v string) *ReservedInstances {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *ReservedInstances) SetCurrencyCode(v string) *ReservedInstances {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetDuration sets the Duration field's value.
    +func (s *ReservedInstances) SetDuration(v int64) *ReservedInstances {
    +	s.Duration = &v
    +	return s
    +}
    +
    +// SetEnd sets the End field's value.
    +func (s *ReservedInstances) SetEnd(v time.Time) *ReservedInstances {
    +	s.End = &v
    +	return s
    +}
    +
    +// SetFixedPrice sets the FixedPrice field's value.
    +func (s *ReservedInstances) SetFixedPrice(v float64) *ReservedInstances {
    +	s.FixedPrice = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *ReservedInstances) SetInstanceCount(v int64) *ReservedInstances {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetInstanceTenancy sets the InstanceTenancy field's value.
    +func (s *ReservedInstances) SetInstanceTenancy(v string) *ReservedInstances {
    +	s.InstanceTenancy = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ReservedInstances) SetInstanceType(v string) *ReservedInstances {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetOfferingClass sets the OfferingClass field's value.
    +func (s *ReservedInstances) SetOfferingClass(v string) *ReservedInstances {
    +	s.OfferingClass = &v
    +	return s
    +}
    +
    +// SetOfferingType sets the OfferingType field's value.
    +func (s *ReservedInstances) SetOfferingType(v string) *ReservedInstances {
    +	s.OfferingType = &v
    +	return s
    +}
    +
    +// SetProductDescription sets the ProductDescription field's value.
    +func (s *ReservedInstances) SetProductDescription(v string) *ReservedInstances {
    +	s.ProductDescription = &v
    +	return s
    +}
    +
    +// SetRecurringCharges sets the RecurringCharges field's value.
    +func (s *ReservedInstances) SetRecurringCharges(v []*RecurringCharge) *ReservedInstances {
    +	s.RecurringCharges = v
    +	return s
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *ReservedInstances) SetReservedInstancesId(v string) *ReservedInstances {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// SetScope sets the Scope field's value.
    +func (s *ReservedInstances) SetScope(v string) *ReservedInstances {
    +	s.Scope = &v
    +	return s
    +}
    +
    +// SetStart sets the Start field's value.
    +func (s *ReservedInstances) SetStart(v time.Time) *ReservedInstances {
    +	s.Start = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *ReservedInstances) SetState(v string) *ReservedInstances {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *ReservedInstances) SetTags(v []*Tag) *ReservedInstances {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetUsagePrice sets the UsagePrice field's value.
    +func (s *ReservedInstances) SetUsagePrice(v float64) *ReservedInstances {
    +	s.UsagePrice = &v
    +	return s
    +}
    +
    +// Describes the configuration settings for the modified Reserved Instances.
    +type ReservedInstancesConfiguration struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone for the modified Reserved Instances.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The number of modified Reserved Instances.
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer"`
    +
    +	// The instance type for the modified Reserved Instances.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// The network platform of the modified Reserved Instances, which is either
    +	// EC2-Classic or EC2-VPC.
    +	Platform *string `locationName:"platform" type:"string"`
    +
    +	// Whether the Reserved Instance is standard or convertible.
    +	Scope *string `locationName:"scope" type:"string" enum:"scope"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstancesConfiguration) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstancesConfiguration) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ReservedInstancesConfiguration) SetAvailabilityZone(v string) *ReservedInstancesConfiguration {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *ReservedInstancesConfiguration) SetInstanceCount(v int64) *ReservedInstancesConfiguration {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ReservedInstancesConfiguration) SetInstanceType(v string) *ReservedInstancesConfiguration {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ReservedInstancesConfiguration) SetPlatform(v string) *ReservedInstancesConfiguration {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetScope sets the Scope field's value.
    +func (s *ReservedInstancesConfiguration) SetScope(v string) *ReservedInstancesConfiguration {
    +	s.Scope = &v
    +	return s
    +}
    +
    +// Describes the ID of a Reserved Instance.
    +type ReservedInstancesId struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the Reserved Instance.
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstancesId) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstancesId) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *ReservedInstancesId) SetReservedInstancesId(v string) *ReservedInstancesId {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// Describes a Reserved Instance listing.
    +type ReservedInstancesListing struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A unique, case-sensitive key supplied by the client to ensure that the request
    +	// is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// The time the listing was created.
    +	CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The number of instances in this state.
    +	InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"`
    +
    +	// The price of the Reserved Instance listing.
    +	PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"`
    +
    +	// The ID of the Reserved Instance.
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
    +
    +	// The ID of the Reserved Instance listing.
    +	ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"`
    +
    +	// The status of the Reserved Instance listing.
    +	Status *string `locationName:"status" type:"string" enum:"ListingStatus"`
    +
    +	// The reason for the current status of the Reserved Instance listing. The response
    +	// can be blank.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// Any tags assigned to the resource.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The last modified timestamp of the listing.
    +	UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstancesListing) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstancesListing) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *ReservedInstancesListing) SetClientToken(v string) *ReservedInstancesListing {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetCreateDate sets the CreateDate field's value.
    +func (s *ReservedInstancesListing) SetCreateDate(v time.Time) *ReservedInstancesListing {
    +	s.CreateDate = &v
    +	return s
    +}
    +
    +// SetInstanceCounts sets the InstanceCounts field's value.
    +func (s *ReservedInstancesListing) SetInstanceCounts(v []*InstanceCount) *ReservedInstancesListing {
    +	s.InstanceCounts = v
    +	return s
    +}
    +
    +// SetPriceSchedules sets the PriceSchedules field's value.
    +func (s *ReservedInstancesListing) SetPriceSchedules(v []*PriceSchedule) *ReservedInstancesListing {
    +	s.PriceSchedules = v
    +	return s
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *ReservedInstancesListing) SetReservedInstancesId(v string) *ReservedInstancesListing {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// SetReservedInstancesListingId sets the ReservedInstancesListingId field's value.
    +func (s *ReservedInstancesListing) SetReservedInstancesListingId(v string) *ReservedInstancesListing {
    +	s.ReservedInstancesListingId = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ReservedInstancesListing) SetStatus(v string) *ReservedInstancesListing {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ReservedInstancesListing) SetStatusMessage(v string) *ReservedInstancesListing {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *ReservedInstancesListing) SetTags(v []*Tag) *ReservedInstancesListing {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetUpdateDate sets the UpdateDate field's value.
    +func (s *ReservedInstancesListing) SetUpdateDate(v time.Time) *ReservedInstancesListing {
    +	s.UpdateDate = &v
    +	return s
    +}
    +
    +// Describes a Reserved Instance modification.
    +type ReservedInstancesModification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A unique, case-sensitive key supplied by the client to ensure that the request
    +	// is idempotent. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// The time when the modification request was created.
    +	CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The time for the modification to become effective.
    +	EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Contains target configurations along with their corresponding new Reserved
    +	// Instance IDs.
    +	ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"`
    +
    +	// The IDs of one or more Reserved Instances.
    +	ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"`
    +
    +	// A unique ID for the Reserved Instance modification.
    +	ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"`
    +
    +	// The status of the Reserved Instances modification request.
    +	Status *string `locationName:"status" type:"string"`
    +
    +	// The reason for the status.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// The time when the modification request was last updated.
    +	UpdateDate *time.Time `locationName:"updateDate" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstancesModification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstancesModification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *ReservedInstancesModification) SetClientToken(v string) *ReservedInstancesModification {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetCreateDate sets the CreateDate field's value.
    +func (s *ReservedInstancesModification) SetCreateDate(v time.Time) *ReservedInstancesModification {
    +	s.CreateDate = &v
    +	return s
    +}
    +
    +// SetEffectiveDate sets the EffectiveDate field's value.
    +func (s *ReservedInstancesModification) SetEffectiveDate(v time.Time) *ReservedInstancesModification {
    +	s.EffectiveDate = &v
    +	return s
    +}
    +
    +// SetModificationResults sets the ModificationResults field's value.
    +func (s *ReservedInstancesModification) SetModificationResults(v []*ReservedInstancesModificationResult) *ReservedInstancesModification {
    +	s.ModificationResults = v
    +	return s
    +}
    +
    +// SetReservedInstancesIds sets the ReservedInstancesIds field's value.
    +func (s *ReservedInstancesModification) SetReservedInstancesIds(v []*ReservedInstancesId) *ReservedInstancesModification {
    +	s.ReservedInstancesIds = v
    +	return s
    +}
    +
    +// SetReservedInstancesModificationId sets the ReservedInstancesModificationId field's value.
    +func (s *ReservedInstancesModification) SetReservedInstancesModificationId(v string) *ReservedInstancesModification {
    +	s.ReservedInstancesModificationId = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *ReservedInstancesModification) SetStatus(v string) *ReservedInstancesModification {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *ReservedInstancesModification) SetStatusMessage(v string) *ReservedInstancesModification {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// SetUpdateDate sets the UpdateDate field's value.
    +func (s *ReservedInstancesModification) SetUpdateDate(v time.Time) *ReservedInstancesModification {
    +	s.UpdateDate = &v
    +	return s
    +}
    +
    +// Describes the modification request/s.
    +type ReservedInstancesModificationResult struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID for the Reserved Instances that were created as part of the modification
    +	// request. This field is only available when the modification is fulfilled.
    +	ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"`
    +
    +	// The target Reserved Instances configurations supplied as part of the modification
    +	// request.
    +	TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstancesModificationResult) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstancesModificationResult) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservedInstancesId sets the ReservedInstancesId field's value.
    +func (s *ReservedInstancesModificationResult) SetReservedInstancesId(v string) *ReservedInstancesModificationResult {
    +	s.ReservedInstancesId = &v
    +	return s
    +}
    +
    +// SetTargetConfiguration sets the TargetConfiguration field's value.
    +func (s *ReservedInstancesModificationResult) SetTargetConfiguration(v *ReservedInstancesConfiguration) *ReservedInstancesModificationResult {
    +	s.TargetConfiguration = v
    +	return s
    +}
    +
    +// Describes a Reserved Instance offering.
    +type ReservedInstancesOffering struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone in which the Reserved Instance can be used.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The currency of the Reserved Instance offering you are purchasing. It's specified
    +	// using ISO 4217 standard currency codes. At this time, the only supported
    +	// currency is USD.
    +	CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"`
    +
    +	// The duration of the Reserved Instance, in seconds.
    +	Duration *int64 `locationName:"duration" type:"long"`
    +
    +	// The purchase price of the Reserved Instance.
    +	FixedPrice *float64 `locationName:"fixedPrice" type:"float"`
    +
    +	// The tenancy of the instance.
    +	InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"`
    +
    +	// The instance type on which the Reserved Instance can be used.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// Indicates whether the offering is available through the Reserved Instance
    +	// Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering,
    +	// this is true.
    +	Marketplace *bool `locationName:"marketplace" type:"boolean"`
    +
    +	// If convertible it can be exchanged for Reserved Instances of the same or
    +	// higher monetary value, with different configurations. If standard, it is
    +	// not possible to perform an exchange.
    +	OfferingClass *string `locationName:"offeringClass" type:"string" enum:"OfferingClassType"`
    +
    +	// The Reserved Instance offering type.
    +	OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"`
    +
    +	// The pricing details of the Reserved Instance offering.
    +	PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"`
    +
    +	// The Reserved Instance product platform description.
    +	ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"`
    +
    +	// The recurring charge tag assigned to the resource.
    +	RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"`
    +
    +	// The ID of the Reserved Instance offering. This is the offering ID used in
    +	// GetReservedInstancesExchangeQuote to confirm that an exchange can be made.
    +	ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"`
    +
    +	// Whether the Reserved Instance is applied to instances in a region or an Availability
    +	// Zone.
    +	Scope *string `locationName:"scope" type:"string" enum:"scope"`
    +
    +	// The usage price of the Reserved Instance, per hour.
    +	UsagePrice *float64 `locationName:"usagePrice" type:"float"`
    +}
    +
    +// String returns the string representation
    +func (s ReservedInstancesOffering) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ReservedInstancesOffering) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ReservedInstancesOffering) SetAvailabilityZone(v string) *ReservedInstancesOffering {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetCurrencyCode sets the CurrencyCode field's value.
    +func (s *ReservedInstancesOffering) SetCurrencyCode(v string) *ReservedInstancesOffering {
    +	s.CurrencyCode = &v
    +	return s
    +}
    +
    +// SetDuration sets the Duration field's value.
    +func (s *ReservedInstancesOffering) SetDuration(v int64) *ReservedInstancesOffering {
    +	s.Duration = &v
    +	return s
    +}
    +
    +// SetFixedPrice sets the FixedPrice field's value.
    +func (s *ReservedInstancesOffering) SetFixedPrice(v float64) *ReservedInstancesOffering {
    +	s.FixedPrice = &v
    +	return s
    +}
    +
    +// SetInstanceTenancy sets the InstanceTenancy field's value.
    +func (s *ReservedInstancesOffering) SetInstanceTenancy(v string) *ReservedInstancesOffering {
    +	s.InstanceTenancy = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ReservedInstancesOffering) SetInstanceType(v string) *ReservedInstancesOffering {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetMarketplace sets the Marketplace field's value.
    +func (s *ReservedInstancesOffering) SetMarketplace(v bool) *ReservedInstancesOffering {
    +	s.Marketplace = &v
    +	return s
    +}
    +
    +// SetOfferingClass sets the OfferingClass field's value.
    +func (s *ReservedInstancesOffering) SetOfferingClass(v string) *ReservedInstancesOffering {
    +	s.OfferingClass = &v
    +	return s
    +}
    +
    +// SetOfferingType sets the OfferingType field's value.
    +func (s *ReservedInstancesOffering) SetOfferingType(v string) *ReservedInstancesOffering {
    +	s.OfferingType = &v
    +	return s
    +}
    +
    +// SetPricingDetails sets the PricingDetails field's value.
    +func (s *ReservedInstancesOffering) SetPricingDetails(v []*PricingDetail) *ReservedInstancesOffering {
    +	s.PricingDetails = v
    +	return s
    +}
    +
    +// SetProductDescription sets the ProductDescription field's value.
    +func (s *ReservedInstancesOffering) SetProductDescription(v string) *ReservedInstancesOffering {
    +	s.ProductDescription = &v
    +	return s
    +}
    +
    +// SetRecurringCharges sets the RecurringCharges field's value.
    +func (s *ReservedInstancesOffering) SetRecurringCharges(v []*RecurringCharge) *ReservedInstancesOffering {
    +	s.RecurringCharges = v
    +	return s
    +}
    +
    +// SetReservedInstancesOfferingId sets the ReservedInstancesOfferingId field's value.
    +func (s *ReservedInstancesOffering) SetReservedInstancesOfferingId(v string) *ReservedInstancesOffering {
    +	s.ReservedInstancesOfferingId = &v
    +	return s
    +}
    +
    +// SetScope sets the Scope field's value.
    +func (s *ReservedInstancesOffering) SetScope(v string) *ReservedInstancesOffering {
    +	s.Scope = &v
    +	return s
    +}
    +
    +// SetUsagePrice sets the UsagePrice field's value.
    +func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesOffering {
    +	s.UsagePrice = &v
    +	return s
    +}
    +
    +// Contains the parameters for ResetImageAttribute.
    +type ResetImageAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attribute to reset (currently you can only reset the launch permission
    +	// attribute).
    +	//
    +	// Attribute is a required field
    +	Attribute *string `type:"string" required:"true" enum:"ResetImageAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the AMI.
    +	//
    +	// ImageId is a required field
    +	ImageId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ResetImageAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetImageAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ResetImageAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ResetImageAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.ImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImageId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *ResetImageAttributeInput) SetAttribute(v string) *ResetImageAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ResetImageAttributeInput) SetDryRun(v bool) *ResetImageAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *ResetImageAttributeInput) SetImageId(v string) *ResetImageAttributeInput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +type ResetImageAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ResetImageAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetImageAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ResetInstanceAttribute.
    +type ResetInstanceAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attribute to reset.
    +	//
    +	// You can only reset the following attributes: kernel | ramdisk | sourceDestCheck.
    +	// To change an instance attribute, use ModifyInstanceAttribute.
    +	//
    +	// Attribute is a required field
    +	Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the instance.
    +	//
    +	// InstanceId is a required field
    +	InstanceId *string `locationName:"instanceId" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ResetInstanceAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetInstanceAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ResetInstanceAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ResetInstanceAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.InstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *ResetInstanceAttributeInput) SetAttribute(v string) *ResetInstanceAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ResetInstanceAttributeInput) SetDryRun(v bool) *ResetInstanceAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *ResetInstanceAttributeInput) SetInstanceId(v string) *ResetInstanceAttributeInput {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +type ResetInstanceAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ResetInstanceAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetInstanceAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ResetNetworkInterfaceAttribute.
    +type ResetNetworkInterfaceAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +
    +	// The source/destination checking attribute. Resets the value to true.
    +	SourceDestCheck *string `locationName:"sourceDestCheck" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ResetNetworkInterfaceAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetNetworkInterfaceAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ResetNetworkInterfaceAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ResetNetworkInterfaceAttributeInput"}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ResetNetworkInterfaceAttributeInput) SetDryRun(v bool) *ResetNetworkInterfaceAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *ResetNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) *ResetNetworkInterfaceAttributeInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetSourceDestCheck sets the SourceDestCheck field's value.
    +func (s *ResetNetworkInterfaceAttributeInput) SetSourceDestCheck(v string) *ResetNetworkInterfaceAttributeInput {
    +	s.SourceDestCheck = &v
    +	return s
    +}
    +
    +type ResetNetworkInterfaceAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ResetNetworkInterfaceAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetNetworkInterfaceAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for ResetSnapshotAttribute.
    +type ResetSnapshotAttributeInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The attribute to reset. Currently, only the attribute for permission to create
    +	// volumes can be reset.
    +	//
    +	// Attribute is a required field
    +	Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The ID of the snapshot.
    +	//
    +	// SnapshotId is a required field
    +	SnapshotId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s ResetSnapshotAttributeInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetSnapshotAttributeInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ResetSnapshotAttributeInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ResetSnapshotAttributeInput"}
    +	if s.Attribute == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Attribute"))
    +	}
    +	if s.SnapshotId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SnapshotId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAttribute sets the Attribute field's value.
    +func (s *ResetSnapshotAttributeInput) SetAttribute(v string) *ResetSnapshotAttributeInput {
    +	s.Attribute = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *ResetSnapshotAttributeInput) SetDryRun(v bool) *ResetSnapshotAttributeInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *ResetSnapshotAttributeInput) SetSnapshotId(v string) *ResetSnapshotAttributeInput {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +type ResetSnapshotAttributeOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s ResetSnapshotAttributeOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ResetSnapshotAttributeOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for RestoreAddressToClassic.
    +type RestoreAddressToClassicInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The Elastic IP address.
    +	//
    +	// PublicIp is a required field
    +	PublicIp *string `locationName:"publicIp" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RestoreAddressToClassicInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RestoreAddressToClassicInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RestoreAddressToClassicInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RestoreAddressToClassicInput"}
    +	if s.PublicIp == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PublicIp"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RestoreAddressToClassicInput) SetDryRun(v bool) *RestoreAddressToClassicInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *RestoreAddressToClassicInput) SetPublicIp(v string) *RestoreAddressToClassicInput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// Contains the output of RestoreAddressToClassic.
    +type RestoreAddressToClassicOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Elastic IP address.
    +	PublicIp *string `locationName:"publicIp" type:"string"`
    +
    +	// The move status for the IP address.
    +	Status *string `locationName:"status" type:"string" enum:"Status"`
    +}
    +
    +// String returns the string representation
    +func (s RestoreAddressToClassicOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RestoreAddressToClassicOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetPublicIp sets the PublicIp field's value.
    +func (s *RestoreAddressToClassicOutput) SetPublicIp(v string) *RestoreAddressToClassicOutput {
    +	s.PublicIp = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToClassicOutput {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Contains the parameters for RevokeSecurityGroupEgress.
    +type RevokeSecurityGroupEgressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR IP address range. We recommend that you specify the CIDR range in
    +	// a set of IP permissions instead.
    +	CidrIp *string `locationName:"cidrIp" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The start of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// We recommend that you specify the port range in a set of IP permissions instead.
    +	FromPort *int64 `locationName:"fromPort" type:"integer"`
    +
    +	// The ID of the security group.
    +	//
    +	// GroupId is a required field
    +	GroupId *string `locationName:"groupId" type:"string" required:"true"`
    +
    +	// A set of IP permissions. You can't specify a destination security group and
    +	// a CIDR IP address range.
    +	IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"`
    +
    +	// The IP protocol name or number. We recommend that you specify the protocol
    +	// in a set of IP permissions instead.
    +	IpProtocol *string `locationName:"ipProtocol" type:"string"`
    +
    +	// The name of a destination security group. To revoke outbound access to a
    +	// destination security group, we recommend that you use a set of IP permissions
    +	// instead.
    +	SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"`
    +
    +	// The AWS account number for a destination security group. To revoke outbound
    +	// access to a destination security group, we recommend that you use a set of
    +	// IP permissions instead.
    +	SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"`
    +
    +	// The end of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// We recommend that you specify the port range in a set of IP permissions instead.
    +	ToPort *int64 `locationName:"toPort" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s RevokeSecurityGroupEgressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RevokeSecurityGroupEgressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RevokeSecurityGroupEgressInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RevokeSecurityGroupEgressInput"}
    +	if s.GroupId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("GroupId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetCidrIp sets the CidrIp field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetCidrIp(v string) *RevokeSecurityGroupEgressInput {
    +	s.CidrIp = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetDryRun(v bool) *RevokeSecurityGroupEgressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFromPort sets the FromPort field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetFromPort(v int64) *RevokeSecurityGroupEgressInput {
    +	s.FromPort = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetGroupId(v string) *RevokeSecurityGroupEgressInput {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetIpPermissions sets the IpPermissions field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressInput {
    +	s.IpPermissions = v
    +	return s
    +}
    +
    +// SetIpProtocol sets the IpProtocol field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetIpProtocol(v string) *RevokeSecurityGroupEgressInput {
    +	s.IpProtocol = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetSourceSecurityGroupName(v string) *RevokeSecurityGroupEgressInput {
    +	s.SourceSecurityGroupName = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetSourceSecurityGroupOwnerId(v string) *RevokeSecurityGroupEgressInput {
    +	s.SourceSecurityGroupOwnerId = &v
    +	return s
    +}
    +
    +// SetToPort sets the ToPort field's value.
    +func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroupEgressInput {
    +	s.ToPort = &v
    +	return s
    +}
    +
    +type RevokeSecurityGroupEgressOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s RevokeSecurityGroupEgressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RevokeSecurityGroupEgressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for RevokeSecurityGroupIngress.
    +type RevokeSecurityGroupIngressInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR IP address range. You can't specify this parameter when specifying
    +	// a source security group.
    +	CidrIp *string `type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// The start of port range for the TCP and UDP protocols, or an ICMP type number.
    +	// For the ICMP type number, use -1 to specify all ICMP types.
    +	FromPort *int64 `type:"integer"`
    +
    +	// The ID of the security group. Required for a security group in a nondefault
    +	// VPC.
    +	GroupId *string `type:"string"`
    +
    +	// [EC2-Classic, default VPC] The name of the security group.
    +	GroupName *string `type:"string"`
    +
    +	// A set of IP permissions. You can't specify a source security group and a
    +	// CIDR IP address range.
    +	IpPermissions []*IpPermission `locationNameList:"item" type:"list"`
    +
    +	// The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)).
    +	// Use -1 to specify all.
    +	IpProtocol *string `type:"string"`
    +
    +	// [EC2-Classic, default VPC] The name of the source security group. You can't
    +	// specify this parameter in combination with the following parameters: the
    +	// CIDR IP address range, the start of the port range, the IP protocol, and
    +	// the end of the port range. For EC2-VPC, the source security group must be
    +	// in the same VPC. To revoke a specific rule for an IP protocol and port range,
    +	// use a set of IP permissions instead.
    +	SourceSecurityGroupName *string `type:"string"`
    +
    +	// [EC2-Classic] The AWS account ID of the source security group, if the source
    +	// security group is in a different account. You can't specify this parameter
    +	// in combination with the following parameters: the CIDR IP address range,
    +	// the IP protocol, the start of the port range, and the end of the port range.
    +	// To revoke a specific rule for an IP protocol and port range, use a set of
    +	// IP permissions instead.
    +	SourceSecurityGroupOwnerId *string `type:"string"`
    +
    +	// The end of port range for the TCP and UDP protocols, or an ICMP code number.
    +	// For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type.
    +	ToPort *int64 `type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s RevokeSecurityGroupIngressInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RevokeSecurityGroupIngressInput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrIp sets the CidrIp field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetCidrIp(v string) *RevokeSecurityGroupIngressInput {
    +	s.CidrIp = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetDryRun(v bool) *RevokeSecurityGroupIngressInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetFromPort sets the FromPort field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetFromPort(v int64) *RevokeSecurityGroupIngressInput {
    +	s.FromPort = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetGroupId(v string) *RevokeSecurityGroupIngressInput {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetGroupName(v string) *RevokeSecurityGroupIngressInput {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetIpPermissions sets the IpPermissions field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressInput {
    +	s.IpPermissions = v
    +	return s
    +}
    +
    +// SetIpProtocol sets the IpProtocol field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetIpProtocol(v string) *RevokeSecurityGroupIngressInput {
    +	s.IpProtocol = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetSourceSecurityGroupName(v string) *RevokeSecurityGroupIngressInput {
    +	s.SourceSecurityGroupName = &v
    +	return s
    +}
    +
    +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetSourceSecurityGroupOwnerId(v string) *RevokeSecurityGroupIngressInput {
    +	s.SourceSecurityGroupOwnerId = &v
    +	return s
    +}
    +
    +// SetToPort sets the ToPort field's value.
    +func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGroupIngressInput {
    +	s.ToPort = &v
    +	return s
    +}
    +
    +type RevokeSecurityGroupIngressOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s RevokeSecurityGroupIngressOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RevokeSecurityGroupIngressOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Describes a route in a route table.
    +type Route struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR block used for the destination match.
    +	DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"`
    +
    +	// The prefix of the AWS service.
    +	DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"`
    +
    +	// The ID of a gateway attached to your VPC.
    +	GatewayId *string `locationName:"gatewayId" type:"string"`
    +
    +	// The ID of a NAT instance in your VPC.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The AWS account ID of the owner of the instance.
    +	InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"`
    +
    +	// The ID of a NAT gateway.
    +	NatGatewayId *string `locationName:"natGatewayId" type:"string"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"`
    +
    +	// Describes how the route was created.
    +	//
    +	//    * CreateRouteTable - The route was automatically created when the route
    +	//    table was created.
    +	//
    +	//    * CreateRoute - The route was manually added to the route table.
    +	//
    +	//    * EnableVgwRoutePropagation - The route was propagated by route propagation.
    +	Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"`
    +
    +	// The state of the route. The blackhole state indicates that the route's target
    +	// isn't available (for example, the specified gateway isn't attached to the
    +	// VPC, or the specified NAT instance has been terminated).
    +	State *string `locationName:"state" type:"string" enum:"RouteState"`
    +
    +	// The ID of the VPC peering connection.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Route) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Route) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *Route) SetDestinationCidrBlock(v string) *Route {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value.
    +func (s *Route) SetDestinationPrefixListId(v string) *Route {
    +	s.DestinationPrefixListId = &v
    +	return s
    +}
    +
    +// SetGatewayId sets the GatewayId field's value.
    +func (s *Route) SetGatewayId(v string) *Route {
    +	s.GatewayId = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *Route) SetInstanceId(v string) *Route {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetInstanceOwnerId sets the InstanceOwnerId field's value.
    +func (s *Route) SetInstanceOwnerId(v string) *Route {
    +	s.InstanceOwnerId = &v
    +	return s
    +}
    +
    +// SetNatGatewayId sets the NatGatewayId field's value.
    +func (s *Route) SetNatGatewayId(v string) *Route {
    +	s.NatGatewayId = &v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *Route) SetNetworkInterfaceId(v string) *Route {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetOrigin sets the Origin field's value.
    +func (s *Route) SetOrigin(v string) *Route {
    +	s.Origin = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Route) SetState(v string) *Route {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *Route) SetVpcPeeringConnectionId(v string) *Route {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Describes a route table.
    +type RouteTable struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The associations between the route table and one or more subnets.
    +	Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"`
    +
    +	// Any virtual private gateway (VGW) propagating routes.
    +	PropagatingVgws []*PropagatingVgw `locationName:"propagatingVgwSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the route table.
    +	RouteTableId *string `locationName:"routeTableId" type:"string"`
    +
    +	// The routes in the route table.
    +	Routes []*Route `locationName:"routeSet" locationNameList:"item" type:"list"`
    +
    +	// Any tags assigned to the route table.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s RouteTable) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RouteTable) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociations sets the Associations field's value.
    +func (s *RouteTable) SetAssociations(v []*RouteTableAssociation) *RouteTable {
    +	s.Associations = v
    +	return s
    +}
    +
    +// SetPropagatingVgws sets the PropagatingVgws field's value.
    +func (s *RouteTable) SetPropagatingVgws(v []*PropagatingVgw) *RouteTable {
    +	s.PropagatingVgws = v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *RouteTable) SetRouteTableId(v string) *RouteTable {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +// SetRoutes sets the Routes field's value.
    +func (s *RouteTable) SetRoutes(v []*Route) *RouteTable {
    +	s.Routes = v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *RouteTable) SetTags(v []*Tag) *RouteTable {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *RouteTable) SetVpcId(v string) *RouteTable {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes an association between a route table and a subnet.
    +type RouteTableAssociation struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether this is the main route table.
    +	Main *bool `locationName:"main" type:"boolean"`
    +
    +	// The ID of the association between a route table and a subnet.
    +	RouteTableAssociationId *string `locationName:"routeTableAssociationId" type:"string"`
    +
    +	// The ID of the route table.
    +	RouteTableId *string `locationName:"routeTableId" type:"string"`
    +
    +	// The ID of the subnet. A subnet ID is not returned for an implicit association.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s RouteTableAssociation) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RouteTableAssociation) GoString() string {
    +	return s.String()
    +}
    +
    +// SetMain sets the Main field's value.
    +func (s *RouteTableAssociation) SetMain(v bool) *RouteTableAssociation {
    +	s.Main = &v
    +	return s
    +}
    +
    +// SetRouteTableAssociationId sets the RouteTableAssociationId field's value.
    +func (s *RouteTableAssociation) SetRouteTableAssociationId(v string) *RouteTableAssociation {
    +	s.RouteTableAssociationId = &v
    +	return s
    +}
    +
    +// SetRouteTableId sets the RouteTableId field's value.
    +func (s *RouteTableAssociation) SetRouteTableId(v string) *RouteTableAssociation {
    +	s.RouteTableId = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *RouteTableAssociation) SetSubnetId(v string) *RouteTableAssociation {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Contains the parameters for RunInstances.
    +type RunInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Reserved.
    +	AdditionalInfo *string `locationName:"additionalInfo" type:"string"`
    +
    +	// The block device mapping.
    +	//
    +	// Supplying both a snapshot ID and an encryption value as arguments for block-device
    +	// mapping results in an error. This is because only blank volumes can be encrypted
    +	// on start, and these are not created from a snapshot. If a snapshot is the
    +	// basis for the volume, it contains data by definition and its encryption status
    +	// cannot be changed using this action.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
    +
    +	// Unique, case-sensitive identifier you provide to ensure the idempotency of
    +	// the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	//
    +	// Constraints: Maximum 64 ASCII characters
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// If you set this parameter to true, you can't terminate the instance using
    +	// the Amazon EC2 console, CLI, or API; otherwise, you can. If you set this
    +	// parameter to true and then later want to be able to terminate the instance,
    +	// you must first change the value of the disableApiTermination attribute to
    +	// false using ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior
    +	// to terminate, you can terminate the instance by running the shutdown command
    +	// from the instance.
    +	//
    +	// Default: false
    +	DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Indicates whether the instance is optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal EBS I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS-optimized
    +	// instance.
    +	//
    +	// Default: false
    +	EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
    +
    +	// The IAM instance profile.
    +	IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"`
    +
    +	// The ID of the AMI, which you can get by calling DescribeImages.
    +	//
    +	// ImageId is a required field
    +	ImageId *string `type:"string" required:"true"`
    +
    +	// Indicates whether an instance stops or terminates when you initiate shutdown
    +	// from the instance (using the operating system command for system shutdown).
    +	//
    +	// Default: stop
    +	InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"`
    +
    +	// The instance type. For more information, see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	//
    +	// Default: m1.small
    +	InstanceType *string `type:"string" enum:"InstanceType"`
    +
    +	// The ID of the kernel.
    +	//
    +	// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more
    +	// information, see  PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	KernelId *string `type:"string"`
    +
    +	// The name of the key pair. You can create a key pair using CreateKeyPair or
    +	// ImportKeyPair.
    +	//
    +	// If you do not specify a key pair, you can't connect to the instance unless
    +	// you choose an AMI that is configured to allow users another way to log in.
    +	KeyName *string `type:"string"`
    +
    +	// The maximum number of instances to launch. If you specify more instances
    +	// than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches
    +	// the largest possible number of instances above MinCount.
    +	//
    +	// Constraints: Between 1 and the maximum number you're allowed for the specified
    +	// instance type. For more information about the default limits, and how to
    +	// request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2)
    +	// in the Amazon EC2 FAQ.
    +	//
    +	// MaxCount is a required field
    +	MaxCount *int64 `type:"integer" required:"true"`
    +
    +	// The minimum number of instances to launch. If you specify a minimum that
    +	// is more instances than Amazon EC2 can launch in the target Availability Zone,
    +	// Amazon EC2 launches no instances.
    +	//
    +	// Constraints: Between 1 and the maximum number you're allowed for the specified
    +	// instance type. For more information about the default limits, and how to
    +	// request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2)
    +	// in the Amazon EC2 General FAQ.
    +	//
    +	// MinCount is a required field
    +	MinCount *int64 `type:"integer" required:"true"`
    +
    +	// The monitoring for the instance.
    +	Monitoring *RunInstancesMonitoringEnabled `type:"structure"`
    +
    +	// One or more network interfaces.
    +	NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"`
    +
    +	// The placement for the instance.
    +	Placement *Placement `type:"structure"`
    +
    +	// [EC2-VPC] The primary IP address. You must specify a value from the IP address
    +	// range of the subnet.
    +	//
    +	// Only one private IP address can be designated as primary. Therefore, you
    +	// can't specify this parameter if PrivateIpAddresses.n.Primary is set to true
    +	// and PrivateIpAddresses.n.PrivateIpAddress is set to an IP address.
    +	//
    +	// You cannot specify this option if you're launching more than one instance
    +	// in the request.
    +	//
    +	// Default: We select an IP address from the IP address range of the subnet.
    +	PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
    +
    +	// The ID of the RAM disk.
    +	//
    +	// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more
    +	// information, see  PV-GRUB (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	RamdiskId *string `type:"string"`
    +
    +	// One or more security group IDs. You can create a security group using CreateSecurityGroup.
    +	//
    +	// Default: Amazon EC2 uses the default security group.
    +	SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// [EC2-Classic, default VPC] One or more security group names. For a nondefault
    +	// VPC, you must use security group IDs instead.
    +	//
    +	// Default: Amazon EC2 uses the default security group.
    +	SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"`
    +
    +	// [EC2-VPC] The ID of the subnet to launch the instance into.
    +	SubnetId *string `type:"string"`
    +
    +	// The user data to make available to the instance. For more information, see
    +	// Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html)
    +	// (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
    +	// (Windows). If you are using an AWS SDK or command line tool, Base64-encoding
    +	// is performed for you, and you can load the text from a file. Otherwise, you
    +	// must provide Base64-encoded text.
    +	UserData *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s RunInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RunInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RunInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RunInstancesInput"}
    +	if s.ImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImageId"))
    +	}
    +	if s.MaxCount == nil {
    +		invalidParams.Add(request.NewErrParamRequired("MaxCount"))
    +	}
    +	if s.MinCount == nil {
    +		invalidParams.Add(request.NewErrParamRequired("MinCount"))
    +	}
    +	if s.Monitoring != nil {
    +		if err := s.Monitoring.Validate(); err != nil {
    +			invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams))
    +		}
    +	}
    +	if s.NetworkInterfaces != nil {
    +		for i, v := range s.NetworkInterfaces {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAdditionalInfo sets the AdditionalInfo field's value.
    +func (s *RunInstancesInput) SetAdditionalInfo(v string) *RunInstancesInput {
    +	s.AdditionalInfo = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *RunInstancesInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RunInstancesInput {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *RunInstancesInput) SetClientToken(v string) *RunInstancesInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDisableApiTermination sets the DisableApiTermination field's value.
    +func (s *RunInstancesInput) SetDisableApiTermination(v bool) *RunInstancesInput {
    +	s.DisableApiTermination = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RunInstancesInput) SetDryRun(v bool) *RunInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *RunInstancesInput) SetEbsOptimized(v bool) *RunInstancesInput {
    +	s.EbsOptimized = &v
    +	return s
    +}
    +
    +// SetIamInstanceProfile sets the IamInstanceProfile field's value.
    +func (s *RunInstancesInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *RunInstancesInput {
    +	s.IamInstanceProfile = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *RunInstancesInput) SetImageId(v string) *RunInstancesInput {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value.
    +func (s *RunInstancesInput) SetInstanceInitiatedShutdownBehavior(v string) *RunInstancesInput {
    +	s.InstanceInitiatedShutdownBehavior = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *RunInstancesInput) SetInstanceType(v string) *RunInstancesInput {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *RunInstancesInput) SetKernelId(v string) *RunInstancesInput {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *RunInstancesInput) SetKeyName(v string) *RunInstancesInput {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetMaxCount sets the MaxCount field's value.
    +func (s *RunInstancesInput) SetMaxCount(v int64) *RunInstancesInput {
    +	s.MaxCount = &v
    +	return s
    +}
    +
    +// SetMinCount sets the MinCount field's value.
    +func (s *RunInstancesInput) SetMinCount(v int64) *RunInstancesInput {
    +	s.MinCount = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *RunInstancesInput) SetMonitoring(v *RunInstancesMonitoringEnabled) *RunInstancesInput {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *RunInstancesInput) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *RunInstancesInput {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *RunInstancesInput) SetPlacement(v *Placement) *RunInstancesInput {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *RunInstancesInput) SetPrivateIpAddress(v string) *RunInstancesInput {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *RunInstancesInput) SetRamdiskId(v string) *RunInstancesInput {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetSecurityGroupIds sets the SecurityGroupIds field's value.
    +func (s *RunInstancesInput) SetSecurityGroupIds(v []*string) *RunInstancesInput {
    +	s.SecurityGroupIds = v
    +	return s
    +}
    +
    +// SetSecurityGroups sets the SecurityGroups field's value.
    +func (s *RunInstancesInput) SetSecurityGroups(v []*string) *RunInstancesInput {
    +	s.SecurityGroups = v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *RunInstancesInput) SetSubnetId(v string) *RunInstancesInput {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *RunInstancesInput) SetUserData(v string) *RunInstancesInput {
    +	s.UserData = &v
    +	return s
    +}
    +
    +// Describes the monitoring for the instance.
    +type RunInstancesMonitoringEnabled struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether monitoring is enabled for the instance.
    +	//
    +	// Enabled is a required field
    +	Enabled *bool `locationName:"enabled" type:"boolean" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RunInstancesMonitoringEnabled) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RunInstancesMonitoringEnabled) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RunInstancesMonitoringEnabled) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RunInstancesMonitoringEnabled"}
    +	if s.Enabled == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Enabled"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetEnabled sets the Enabled field's value.
    +func (s *RunInstancesMonitoringEnabled) SetEnabled(v bool) *RunInstancesMonitoringEnabled {
    +	s.Enabled = &v
    +	return s
    +}
    +
    +// Contains the parameters for RunScheduledInstances.
    +type RunScheduledInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Unique, case-sensitive identifier that ensures the idempotency of the request.
    +	// For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `type:"string" idempotencyToken:"true"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `type:"boolean"`
    +
    +	// The number of instances.
    +	//
    +	// Default: 1
    +	InstanceCount *int64 `type:"integer"`
    +
    +	// The launch specification. You must match the instance type, Availability
    +	// Zone, network, and platform of the schedule that you purchased.
    +	//
    +	// LaunchSpecification is a required field
    +	LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"`
    +
    +	// The Scheduled Instance ID.
    +	//
    +	// ScheduledInstanceId is a required field
    +	ScheduledInstanceId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s RunScheduledInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RunScheduledInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *RunScheduledInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "RunScheduledInstancesInput"}
    +	if s.LaunchSpecification == nil {
    +		invalidParams.Add(request.NewErrParamRequired("LaunchSpecification"))
    +	}
    +	if s.ScheduledInstanceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ScheduledInstanceId"))
    +	}
    +	if s.LaunchSpecification != nil {
    +		if err := s.LaunchSpecification.Validate(); err != nil {
    +			invalidParams.AddNested("LaunchSpecification", err.(request.ErrInvalidParams))
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *RunScheduledInstancesInput) SetClientToken(v string) *RunScheduledInstancesInput {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *RunScheduledInstancesInput) SetDryRun(v bool) *RunScheduledInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *RunScheduledInstancesInput) SetInstanceCount(v int64) *RunScheduledInstancesInput {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetLaunchSpecification sets the LaunchSpecification field's value.
    +func (s *RunScheduledInstancesInput) SetLaunchSpecification(v *ScheduledInstancesLaunchSpecification) *RunScheduledInstancesInput {
    +	s.LaunchSpecification = v
    +	return s
    +}
    +
    +// SetScheduledInstanceId sets the ScheduledInstanceId field's value.
    +func (s *RunScheduledInstancesInput) SetScheduledInstanceId(v string) *RunScheduledInstancesInput {
    +	s.ScheduledInstanceId = &v
    +	return s
    +}
    +
    +// Contains the output of RunScheduledInstances.
    +type RunScheduledInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The IDs of the newly launched instances.
    +	InstanceIdSet []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s RunScheduledInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s RunScheduledInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceIdSet sets the InstanceIdSet field's value.
    +func (s *RunScheduledInstancesOutput) SetInstanceIdSet(v []*string) *RunScheduledInstancesOutput {
    +	s.InstanceIdSet = v
    +	return s
    +}
    +
    +// Describes the storage parameters for S3 and S3 buckets for an instance store-backed
    +// AMI.
    +type S3Storage struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The access key ID of the owner of the bucket. Before you specify a value
    +	// for your access key ID, review and follow the guidance in Best Practices
    +	// for Managing AWS Access Keys (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html).
    +	AWSAccessKeyId *string `type:"string"`
    +
    +	// The bucket in which to store the AMI. You can specify a bucket that you already
    +	// own or a new bucket that Amazon EC2 creates on your behalf. If you specify
    +	// a bucket that belongs to someone else, Amazon EC2 returns an error.
    +	Bucket *string `locationName:"bucket" type:"string"`
    +
    +	// The beginning of the file name of the AMI.
    +	Prefix *string `locationName:"prefix" type:"string"`
    +
    +	// An Amazon S3 upload policy that gives Amazon EC2 permission to upload items
    +	// into Amazon S3 on your behalf.
    +	//
    +	// UploadPolicy is automatically base64 encoded/decoded by the SDK.
    +	UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"`
    +
    +	// The signature of the JSON document.
    +	UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s S3Storage) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s S3Storage) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAWSAccessKeyId sets the AWSAccessKeyId field's value.
    +func (s *S3Storage) SetAWSAccessKeyId(v string) *S3Storage {
    +	s.AWSAccessKeyId = &v
    +	return s
    +}
    +
    +// SetBucket sets the Bucket field's value.
    +func (s *S3Storage) SetBucket(v string) *S3Storage {
    +	s.Bucket = &v
    +	return s
    +}
    +
    +// SetPrefix sets the Prefix field's value.
    +func (s *S3Storage) SetPrefix(v string) *S3Storage {
    +	s.Prefix = &v
    +	return s
    +}
    +
    +// SetUploadPolicy sets the UploadPolicy field's value.
    +func (s *S3Storage) SetUploadPolicy(v []byte) *S3Storage {
    +	s.UploadPolicy = v
    +	return s
    +}
    +
    +// SetUploadPolicySignature sets the UploadPolicySignature field's value.
    +func (s *S3Storage) SetUploadPolicySignature(v string) *S3Storage {
    +	s.UploadPolicySignature = &v
    +	return s
    +}
    +
    +// Describes a Scheduled Instance.
    +type ScheduledInstance struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The date when the Scheduled Instance was purchased.
    +	CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The hourly price for a single instance.
    +	HourlyPrice *string `locationName:"hourlyPrice" type:"string"`
    +
    +	// The number of instances.
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string"`
    +
    +	// The network platform (EC2-Classic or EC2-VPC).
    +	NetworkPlatform *string `locationName:"networkPlatform" type:"string"`
    +
    +	// The time for the next schedule to start.
    +	NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The platform (Linux/UNIX or Windows).
    +	Platform *string `locationName:"platform" type:"string"`
    +
    +	// The time that the previous schedule ended or will end.
    +	PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The schedule recurrence.
    +	Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"`
    +
    +	// The Scheduled Instance ID.
    +	ScheduledInstanceId *string `locationName:"scheduledInstanceId" type:"string"`
    +
    +	// The number of hours in the schedule.
    +	SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"`
    +
    +	// The end date for the Scheduled Instance.
    +	TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The start date for the Scheduled Instance.
    +	TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The total number of hours for a single instance for the entire term.
    +	TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstance) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstance) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ScheduledInstance) SetAvailabilityZone(v string) *ScheduledInstance {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetCreateDate sets the CreateDate field's value.
    +func (s *ScheduledInstance) SetCreateDate(v time.Time) *ScheduledInstance {
    +	s.CreateDate = &v
    +	return s
    +}
    +
    +// SetHourlyPrice sets the HourlyPrice field's value.
    +func (s *ScheduledInstance) SetHourlyPrice(v string) *ScheduledInstance {
    +	s.HourlyPrice = &v
    +	return s
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *ScheduledInstance) SetInstanceCount(v int64) *ScheduledInstance {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ScheduledInstance) SetInstanceType(v string) *ScheduledInstance {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetNetworkPlatform sets the NetworkPlatform field's value.
    +func (s *ScheduledInstance) SetNetworkPlatform(v string) *ScheduledInstance {
    +	s.NetworkPlatform = &v
    +	return s
    +}
    +
    +// SetNextSlotStartTime sets the NextSlotStartTime field's value.
    +func (s *ScheduledInstance) SetNextSlotStartTime(v time.Time) *ScheduledInstance {
    +	s.NextSlotStartTime = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ScheduledInstance) SetPlatform(v string) *ScheduledInstance {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetPreviousSlotEndTime sets the PreviousSlotEndTime field's value.
    +func (s *ScheduledInstance) SetPreviousSlotEndTime(v time.Time) *ScheduledInstance {
    +	s.PreviousSlotEndTime = &v
    +	return s
    +}
    +
    +// SetRecurrence sets the Recurrence field's value.
    +func (s *ScheduledInstance) SetRecurrence(v *ScheduledInstanceRecurrence) *ScheduledInstance {
    +	s.Recurrence = v
    +	return s
    +}
    +
    +// SetScheduledInstanceId sets the ScheduledInstanceId field's value.
    +func (s *ScheduledInstance) SetScheduledInstanceId(v string) *ScheduledInstance {
    +	s.ScheduledInstanceId = &v
    +	return s
    +}
    +
    +// SetSlotDurationInHours sets the SlotDurationInHours field's value.
    +func (s *ScheduledInstance) SetSlotDurationInHours(v int64) *ScheduledInstance {
    +	s.SlotDurationInHours = &v
    +	return s
    +}
    +
    +// SetTermEndDate sets the TermEndDate field's value.
    +func (s *ScheduledInstance) SetTermEndDate(v time.Time) *ScheduledInstance {
    +	s.TermEndDate = &v
    +	return s
    +}
    +
    +// SetTermStartDate sets the TermStartDate field's value.
    +func (s *ScheduledInstance) SetTermStartDate(v time.Time) *ScheduledInstance {
    +	s.TermStartDate = &v
    +	return s
    +}
    +
    +// SetTotalScheduledInstanceHours sets the TotalScheduledInstanceHours field's value.
    +func (s *ScheduledInstance) SetTotalScheduledInstanceHours(v int64) *ScheduledInstance {
    +	s.TotalScheduledInstanceHours = &v
    +	return s
    +}
    +
    +// Describes a schedule that is available for your Scheduled Instances.
    +type ScheduledInstanceAvailability struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The number of available instances.
    +	AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"`
    +
    +	// The time period for the first schedule to start.
    +	FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The hourly price for a single instance.
    +	HourlyPrice *string `locationName:"hourlyPrice" type:"string"`
    +
    +	// The instance type. You can specify one of the C3, C4, M4, or R3 instance
    +	// types.
    +	InstanceType *string `locationName:"instanceType" type:"string"`
    +
    +	// The maximum term. The only possible value is 365 days.
    +	MaxTermDurationInDays *int64 `locationName:"maxTermDurationInDays" type:"integer"`
    +
    +	// The minimum term. The only possible value is 365 days.
    +	MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"`
    +
    +	// The network platform (EC2-Classic or EC2-VPC).
    +	NetworkPlatform *string `locationName:"networkPlatform" type:"string"`
    +
    +	// The platform (Linux/UNIX or Windows).
    +	Platform *string `locationName:"platform" type:"string"`
    +
    +	// The purchase token. This token expires in two hours.
    +	PurchaseToken *string `locationName:"purchaseToken" type:"string"`
    +
    +	// The schedule recurrence.
    +	Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"`
    +
    +	// The number of hours in the schedule.
    +	SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"`
    +
    +	// The total number of hours for a single instance for the entire term.
    +	TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstanceAvailability) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstanceAvailability) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ScheduledInstanceAvailability) SetAvailabilityZone(v string) *ScheduledInstanceAvailability {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetAvailableInstanceCount sets the AvailableInstanceCount field's value.
    +func (s *ScheduledInstanceAvailability) SetAvailableInstanceCount(v int64) *ScheduledInstanceAvailability {
    +	s.AvailableInstanceCount = &v
    +	return s
    +}
    +
    +// SetFirstSlotStartTime sets the FirstSlotStartTime field's value.
    +func (s *ScheduledInstanceAvailability) SetFirstSlotStartTime(v time.Time) *ScheduledInstanceAvailability {
    +	s.FirstSlotStartTime = &v
    +	return s
    +}
    +
    +// SetHourlyPrice sets the HourlyPrice field's value.
    +func (s *ScheduledInstanceAvailability) SetHourlyPrice(v string) *ScheduledInstanceAvailability {
    +	s.HourlyPrice = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ScheduledInstanceAvailability) SetInstanceType(v string) *ScheduledInstanceAvailability {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetMaxTermDurationInDays sets the MaxTermDurationInDays field's value.
    +func (s *ScheduledInstanceAvailability) SetMaxTermDurationInDays(v int64) *ScheduledInstanceAvailability {
    +	s.MaxTermDurationInDays = &v
    +	return s
    +}
    +
    +// SetMinTermDurationInDays sets the MinTermDurationInDays field's value.
    +func (s *ScheduledInstanceAvailability) SetMinTermDurationInDays(v int64) *ScheduledInstanceAvailability {
    +	s.MinTermDurationInDays = &v
    +	return s
    +}
    +
    +// SetNetworkPlatform sets the NetworkPlatform field's value.
    +func (s *ScheduledInstanceAvailability) SetNetworkPlatform(v string) *ScheduledInstanceAvailability {
    +	s.NetworkPlatform = &v
    +	return s
    +}
    +
    +// SetPlatform sets the Platform field's value.
    +func (s *ScheduledInstanceAvailability) SetPlatform(v string) *ScheduledInstanceAvailability {
    +	s.Platform = &v
    +	return s
    +}
    +
    +// SetPurchaseToken sets the PurchaseToken field's value.
    +func (s *ScheduledInstanceAvailability) SetPurchaseToken(v string) *ScheduledInstanceAvailability {
    +	s.PurchaseToken = &v
    +	return s
    +}
    +
    +// SetRecurrence sets the Recurrence field's value.
    +func (s *ScheduledInstanceAvailability) SetRecurrence(v *ScheduledInstanceRecurrence) *ScheduledInstanceAvailability {
    +	s.Recurrence = v
    +	return s
    +}
    +
    +// SetSlotDurationInHours sets the SlotDurationInHours field's value.
    +func (s *ScheduledInstanceAvailability) SetSlotDurationInHours(v int64) *ScheduledInstanceAvailability {
    +	s.SlotDurationInHours = &v
    +	return s
    +}
    +
    +// SetTotalScheduledInstanceHours sets the TotalScheduledInstanceHours field's value.
    +func (s *ScheduledInstanceAvailability) SetTotalScheduledInstanceHours(v int64) *ScheduledInstanceAvailability {
    +	s.TotalScheduledInstanceHours = &v
    +	return s
    +}
    +
    +// Describes the recurring schedule for a Scheduled Instance.
    +type ScheduledInstanceRecurrence struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The frequency (Daily, Weekly, or Monthly).
    +	Frequency *string `locationName:"frequency" type:"string"`
    +
    +	// The interval quantity. The interval unit depends on the value of frequency.
    +	// For example, every 2 weeks or every 2 months.
    +	Interval *int64 `locationName:"interval" type:"integer"`
    +
    +	// The days. For a monthly schedule, this is one or more days of the month (1-31).
    +	// For a weekly schedule, this is one or more days of the week (1-7, where 1
    +	// is Sunday).
    +	OccurrenceDaySet []*int64 `locationName:"occurrenceDaySet" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether the occurrence is relative to the end of the specified
    +	// week or month.
    +	OccurrenceRelativeToEnd *bool `locationName:"occurrenceRelativeToEnd" type:"boolean"`
    +
    +	// The unit for occurrenceDaySet (DayOfWeek or DayOfMonth).
    +	OccurrenceUnit *string `locationName:"occurrenceUnit" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstanceRecurrence) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstanceRecurrence) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFrequency sets the Frequency field's value.
    +func (s *ScheduledInstanceRecurrence) SetFrequency(v string) *ScheduledInstanceRecurrence {
    +	s.Frequency = &v
    +	return s
    +}
    +
    +// SetInterval sets the Interval field's value.
    +func (s *ScheduledInstanceRecurrence) SetInterval(v int64) *ScheduledInstanceRecurrence {
    +	s.Interval = &v
    +	return s
    +}
    +
    +// SetOccurrenceDaySet sets the OccurrenceDaySet field's value.
    +func (s *ScheduledInstanceRecurrence) SetOccurrenceDaySet(v []*int64) *ScheduledInstanceRecurrence {
    +	s.OccurrenceDaySet = v
    +	return s
    +}
    +
    +// SetOccurrenceRelativeToEnd sets the OccurrenceRelativeToEnd field's value.
    +func (s *ScheduledInstanceRecurrence) SetOccurrenceRelativeToEnd(v bool) *ScheduledInstanceRecurrence {
    +	s.OccurrenceRelativeToEnd = &v
    +	return s
    +}
    +
    +// SetOccurrenceUnit sets the OccurrenceUnit field's value.
    +func (s *ScheduledInstanceRecurrence) SetOccurrenceUnit(v string) *ScheduledInstanceRecurrence {
    +	s.OccurrenceUnit = &v
    +	return s
    +}
    +
    +// Describes the recurring schedule for a Scheduled Instance.
    +type ScheduledInstanceRecurrenceRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The frequency (Daily, Weekly, or Monthly).
    +	Frequency *string `type:"string"`
    +
    +	// The interval quantity. The interval unit depends on the value of Frequency.
    +	// For example, every 2 weeks or every 2 months.
    +	Interval *int64 `type:"integer"`
    +
    +	// The days. For a monthly schedule, this is one or more days of the month (1-31).
    +	// For a weekly schedule, this is one or more days of the week (1-7, where 1
    +	// is Sunday). You can't specify this value with a daily schedule. If the occurrence
    +	// is relative to the end of the month, you can specify only a single day.
    +	OccurrenceDays []*int64 `locationName:"OccurrenceDay" locationNameList:"OccurenceDay" type:"list"`
    +
    +	// Indicates whether the occurrence is relative to the end of the specified
    +	// week or month. You can't specify this value with a daily schedule.
    +	OccurrenceRelativeToEnd *bool `type:"boolean"`
    +
    +	// The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required
    +	// for a monthly schedule. You can't specify DayOfWeek with a weekly schedule.
    +	// You can't specify this value with a daily schedule.
    +	OccurrenceUnit *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstanceRecurrenceRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstanceRecurrenceRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFrequency sets the Frequency field's value.
    +func (s *ScheduledInstanceRecurrenceRequest) SetFrequency(v string) *ScheduledInstanceRecurrenceRequest {
    +	s.Frequency = &v
    +	return s
    +}
    +
    +// SetInterval sets the Interval field's value.
    +func (s *ScheduledInstanceRecurrenceRequest) SetInterval(v int64) *ScheduledInstanceRecurrenceRequest {
    +	s.Interval = &v
    +	return s
    +}
    +
    +// SetOccurrenceDays sets the OccurrenceDays field's value.
    +func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceDays(v []*int64) *ScheduledInstanceRecurrenceRequest {
    +	s.OccurrenceDays = v
    +	return s
    +}
    +
    +// SetOccurrenceRelativeToEnd sets the OccurrenceRelativeToEnd field's value.
    +func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceRelativeToEnd(v bool) *ScheduledInstanceRecurrenceRequest {
    +	s.OccurrenceRelativeToEnd = &v
    +	return s
    +}
    +
    +// SetOccurrenceUnit sets the OccurrenceUnit field's value.
    +func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceUnit(v string) *ScheduledInstanceRecurrenceRequest {
    +	s.OccurrenceUnit = &v
    +	return s
    +}
    +
    +// Describes a block device mapping for a Scheduled Instance.
    +type ScheduledInstancesBlockDeviceMapping struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The device name exposed to the instance (for example, /dev/sdh or xvdh).
    +	DeviceName *string `type:"string"`
    +
    +	// Parameters used to set up EBS volumes automatically when the instance is
    +	// launched.
    +	Ebs *ScheduledInstancesEbs `type:"structure"`
    +
    +	// Suppresses the specified device included in the block device mapping of the
    +	// AMI.
    +	NoDevice *string `type:"string"`
    +
    +	// The virtual device name (ephemeralN). Instance store volumes are numbered
    +	// starting from 0. An instance type with two available instance store volumes
    +	// can specify mappings for ephemeral0 and ephemeral1.The number of available
    +	// instance store volumes depends on the instance type. After you connect to
    +	// the instance, you must mount the volume.
    +	//
    +	// Constraints: For M3 instances, you must specify instance store volumes in
    +	// the block device mapping for the instance. When you launch an M3 instance,
    +	// we ignore any instance store volumes specified in the block device mapping
    +	// for the AMI.
    +	VirtualName *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesBlockDeviceMapping) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesBlockDeviceMapping) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeviceName sets the DeviceName field's value.
    +func (s *ScheduledInstancesBlockDeviceMapping) SetDeviceName(v string) *ScheduledInstancesBlockDeviceMapping {
    +	s.DeviceName = &v
    +	return s
    +}
    +
    +// SetEbs sets the Ebs field's value.
    +func (s *ScheduledInstancesBlockDeviceMapping) SetEbs(v *ScheduledInstancesEbs) *ScheduledInstancesBlockDeviceMapping {
    +	s.Ebs = v
    +	return s
    +}
    +
    +// SetNoDevice sets the NoDevice field's value.
    +func (s *ScheduledInstancesBlockDeviceMapping) SetNoDevice(v string) *ScheduledInstancesBlockDeviceMapping {
    +	s.NoDevice = &v
    +	return s
    +}
    +
    +// SetVirtualName sets the VirtualName field's value.
    +func (s *ScheduledInstancesBlockDeviceMapping) SetVirtualName(v string) *ScheduledInstancesBlockDeviceMapping {
    +	s.VirtualName = &v
    +	return s
    +}
    +
    +// Describes an EBS volume for a Scheduled Instance.
    +type ScheduledInstancesEbs struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the volume is deleted on instance termination.
    +	DeleteOnTermination *bool `type:"boolean"`
    +
    +	// Indicates whether the volume is encrypted. You can attached encrypted volumes
    +	// only to instances that support them.
    +	Encrypted *bool `type:"boolean"`
    +
    +	// The number of I/O operations per second (IOPS) that the volume supports.
    +	// For io1 volumes, this represents the number of IOPS that are provisioned
    +	// for the volume. For gp2 volumes, this represents the baseline performance
    +	// of the volume and the rate at which the volume accumulates I/O credits for
    +	// bursting. For more information about gp2 baseline performance, I/O credits,
    +	// and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	//
    +	// Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for
    +	// gp2 volumes.
    +	//
    +	// Condition: This parameter is required for requests to create io1volumes;
    +	// it is not used in requests to create gp2, st1, sc1, or standard volumes.
    +	Iops *int64 `type:"integer"`
    +
    +	// The ID of the snapshot.
    +	SnapshotId *string `type:"string"`
    +
    +	// The size of the volume, in GiB.
    +	//
    +	// Default: If you're creating the volume from a snapshot and don't specify
    +	// a volume size, the default is the snapshot size.
    +	VolumeSize *int64 `type:"integer"`
    +
    +	// The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD,
    +	// Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic.
    +	//
    +	// Default: standard
    +	VolumeType *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesEbs) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesEbs) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *ScheduledInstancesEbs) SetDeleteOnTermination(v bool) *ScheduledInstancesEbs {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *ScheduledInstancesEbs) SetEncrypted(v bool) *ScheduledInstancesEbs {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetIops sets the Iops field's value.
    +func (s *ScheduledInstancesEbs) SetIops(v int64) *ScheduledInstancesEbs {
    +	s.Iops = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *ScheduledInstancesEbs) SetSnapshotId(v string) *ScheduledInstancesEbs {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetVolumeSize sets the VolumeSize field's value.
    +func (s *ScheduledInstancesEbs) SetVolumeSize(v int64) *ScheduledInstancesEbs {
    +	s.VolumeSize = &v
    +	return s
    +}
    +
    +// SetVolumeType sets the VolumeType field's value.
    +func (s *ScheduledInstancesEbs) SetVolumeType(v string) *ScheduledInstancesEbs {
    +	s.VolumeType = &v
    +	return s
    +}
    +
    +// Describes an IAM instance profile for a Scheduled Instance.
    +type ScheduledInstancesIamInstanceProfile struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon Resource Name (ARN).
    +	Arn *string `type:"string"`
    +
    +	// The name.
    +	Name *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesIamInstanceProfile) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesIamInstanceProfile) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArn sets the Arn field's value.
    +func (s *ScheduledInstancesIamInstanceProfile) SetArn(v string) *ScheduledInstancesIamInstanceProfile {
    +	s.Arn = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *ScheduledInstancesIamInstanceProfile) SetName(v string) *ScheduledInstancesIamInstanceProfile {
    +	s.Name = &v
    +	return s
    +}
    +
    +// Describes the launch specification for a Scheduled Instance.
    +//
    +// If you are launching the Scheduled Instance in EC2-VPC, you must specify
    +// the ID of the subnet. You can specify the subnet using either SubnetId or
    +// NetworkInterface.
    +type ScheduledInstancesLaunchSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// One or more block device mapping entries.
    +	BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
    +
    +	// Indicates whether the instances are optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal EBS I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS-optimized
    +	// instance.
    +	//
    +	// Default: false
    +	EbsOptimized *bool `type:"boolean"`
    +
    +	// The IAM instance profile.
    +	IamInstanceProfile *ScheduledInstancesIamInstanceProfile `type:"structure"`
    +
    +	// The ID of the Amazon Machine Image (AMI).
    +	//
    +	// ImageId is a required field
    +	ImageId *string `type:"string" required:"true"`
    +
    +	// The instance type.
    +	InstanceType *string `type:"string"`
    +
    +	// The ID of the kernel.
    +	KernelId *string `type:"string"`
    +
    +	// The name of the key pair.
    +	KeyName *string `type:"string"`
    +
    +	// Enable or disable monitoring for the instances.
    +	Monitoring *ScheduledInstancesMonitoring `type:"structure"`
    +
    +	// One or more network interfaces.
    +	NetworkInterfaces []*ScheduledInstancesNetworkInterface `locationName:"NetworkInterface" locationNameList:"NetworkInterface" type:"list"`
    +
    +	// The placement information.
    +	Placement *ScheduledInstancesPlacement `type:"structure"`
    +
    +	// The ID of the RAM disk.
    +	RamdiskId *string `type:"string"`
    +
    +	// The IDs of one or more security groups.
    +	SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// The ID of the subnet in which to launch the instances.
    +	SubnetId *string `type:"string"`
    +
    +	// The base64-encoded MIME user data.
    +	UserData *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesLaunchSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesLaunchSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *ScheduledInstancesLaunchSpecification) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "ScheduledInstancesLaunchSpecification"}
    +	if s.ImageId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("ImageId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetBlockDeviceMappings(v []*ScheduledInstancesBlockDeviceMapping) *ScheduledInstancesLaunchSpecification {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetEbsOptimized(v bool) *ScheduledInstancesLaunchSpecification {
    +	s.EbsOptimized = &v
    +	return s
    +}
    +
    +// SetIamInstanceProfile sets the IamInstanceProfile field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetIamInstanceProfile(v *ScheduledInstancesIamInstanceProfile) *ScheduledInstancesLaunchSpecification {
    +	s.IamInstanceProfile = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetImageId(v string) *ScheduledInstancesLaunchSpecification {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetInstanceType(v string) *ScheduledInstancesLaunchSpecification {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetKernelId(v string) *ScheduledInstancesLaunchSpecification {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetKeyName(v string) *ScheduledInstancesLaunchSpecification {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetMonitoring(v *ScheduledInstancesMonitoring) *ScheduledInstancesLaunchSpecification {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetNetworkInterfaces(v []*ScheduledInstancesNetworkInterface) *ScheduledInstancesLaunchSpecification {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetPlacement(v *ScheduledInstancesPlacement) *ScheduledInstancesLaunchSpecification {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetRamdiskId(v string) *ScheduledInstancesLaunchSpecification {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetSecurityGroupIds sets the SecurityGroupIds field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetSecurityGroupIds(v []*string) *ScheduledInstancesLaunchSpecification {
    +	s.SecurityGroupIds = v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetSubnetId(v string) *ScheduledInstancesLaunchSpecification {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *ScheduledInstancesLaunchSpecification) SetUserData(v string) *ScheduledInstancesLaunchSpecification {
    +	s.UserData = &v
    +	return s
    +}
    +
    +// Describes whether monitoring is enabled for a Scheduled Instance.
    +type ScheduledInstancesMonitoring struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether monitoring is enabled.
    +	Enabled *bool `type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesMonitoring) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesMonitoring) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEnabled sets the Enabled field's value.
    +func (s *ScheduledInstancesMonitoring) SetEnabled(v bool) *ScheduledInstancesMonitoring {
    +	s.Enabled = &v
    +	return s
    +}
    +
    +// Describes a network interface for a Scheduled Instance.
    +type ScheduledInstancesNetworkInterface struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether to assign a public IP address to instances launched in
    +	// a VPC. The public IP address can only be assigned to a network interface
    +	// for eth0, and can only be assigned to a new network interface, not an existing
    +	// one. You cannot specify more than one network interface in the request. If
    +	// launching into a default subnet, the default value is true.
    +	AssociatePublicIpAddress *bool `type:"boolean"`
    +
    +	// Indicates whether to delete the interface when the instance is terminated.
    +	DeleteOnTermination *bool `type:"boolean"`
    +
    +	// The description.
    +	Description *string `type:"string"`
    +
    +	// The index of the device for the network interface attachment.
    +	DeviceIndex *int64 `type:"integer"`
    +
    +	// The IDs of one or more security groups.
    +	Groups []*string `locationName:"Group" locationNameList:"SecurityGroupId" type:"list"`
    +
    +	// The ID of the network interface.
    +	NetworkInterfaceId *string `type:"string"`
    +
    +	// The IP address of the network interface within the subnet.
    +	PrivateIpAddress *string `type:"string"`
    +
    +	// The private IP addresses.
    +	PrivateIpAddressConfigs []*ScheduledInstancesPrivateIpAddressConfig `locationName:"PrivateIpAddressConfig" locationNameList:"PrivateIpAddressConfigSet" type:"list"`
    +
    +	// The number of secondary private IP addresses.
    +	SecondaryPrivateIpAddressCount *int64 `type:"integer"`
    +
    +	// The ID of the subnet.
    +	SubnetId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesNetworkInterface) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesNetworkInterface) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetAssociatePublicIpAddress(v bool) *ScheduledInstancesNetworkInterface {
    +	s.AssociatePublicIpAddress = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetDeleteOnTermination(v bool) *ScheduledInstancesNetworkInterface {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetDescription(v string) *ScheduledInstancesNetworkInterface {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDeviceIndex sets the DeviceIndex field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetDeviceIndex(v int64) *ScheduledInstancesNetworkInterface {
    +	s.DeviceIndex = &v
    +	return s
    +}
    +
    +// SetGroups sets the Groups field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetGroups(v []*string) *ScheduledInstancesNetworkInterface {
    +	s.Groups = v
    +	return s
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetNetworkInterfaceId(v string) *ScheduledInstancesNetworkInterface {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetPrivateIpAddress(v string) *ScheduledInstancesNetworkInterface {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddressConfigs sets the PrivateIpAddressConfigs field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetPrivateIpAddressConfigs(v []*ScheduledInstancesPrivateIpAddressConfig) *ScheduledInstancesNetworkInterface {
    +	s.PrivateIpAddressConfigs = v
    +	return s
    +}
    +
    +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetSecondaryPrivateIpAddressCount(v int64) *ScheduledInstancesNetworkInterface {
    +	s.SecondaryPrivateIpAddressCount = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *ScheduledInstancesNetworkInterface) SetSubnetId(v string) *ScheduledInstancesNetworkInterface {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// Describes the placement for a Scheduled Instance.
    +type ScheduledInstancesPlacement struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone.
    +	AvailabilityZone *string `type:"string"`
    +
    +	// The name of the placement group.
    +	GroupName *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesPlacement) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesPlacement) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *ScheduledInstancesPlacement) SetAvailabilityZone(v string) *ScheduledInstancesPlacement {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *ScheduledInstancesPlacement) SetGroupName(v string) *ScheduledInstancesPlacement {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// Describes a private IP address for a Scheduled Instance.
    +type ScheduledInstancesPrivateIpAddressConfig struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether this is a primary IP address. Otherwise, this is a secondary
    +	// IP address.
    +	Primary *bool `type:"boolean"`
    +
    +	// The IP address.
    +	PrivateIpAddress *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s ScheduledInstancesPrivateIpAddressConfig) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s ScheduledInstancesPrivateIpAddressConfig) GoString() string {
    +	return s.String()
    +}
    +
    +// SetPrimary sets the Primary field's value.
    +func (s *ScheduledInstancesPrivateIpAddressConfig) SetPrimary(v bool) *ScheduledInstancesPrivateIpAddressConfig {
    +	s.Primary = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddress sets the PrivateIpAddress field's value.
    +func (s *ScheduledInstancesPrivateIpAddressConfig) SetPrivateIpAddress(v string) *ScheduledInstancesPrivateIpAddressConfig {
    +	s.PrivateIpAddress = &v
    +	return s
    +}
    +
    +// Describes a security group
    +type SecurityGroup struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the security group.
    +	Description *string `locationName:"groupDescription" type:"string"`
    +
    +	// The ID of the security group.
    +	GroupId *string `locationName:"groupId" type:"string"`
    +
    +	// The name of the security group.
    +	GroupName *string `locationName:"groupName" type:"string"`
    +
    +	// One or more inbound rules associated with the security group.
    +	IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"`
    +
    +	// [EC2-VPC] One or more outbound rules associated with the security group.
    +	IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"`
    +
    +	// The AWS account ID of the owner of the security group.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// Any tags assigned to the security group.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// [EC2-VPC] The ID of the VPC for the security group.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s SecurityGroup) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SecurityGroup) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *SecurityGroup) SetDescription(v string) *SecurityGroup {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *SecurityGroup) SetGroupId(v string) *SecurityGroup {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *SecurityGroup) SetGroupName(v string) *SecurityGroup {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetIpPermissions sets the IpPermissions field's value.
    +func (s *SecurityGroup) SetIpPermissions(v []*IpPermission) *SecurityGroup {
    +	s.IpPermissions = v
    +	return s
    +}
    +
    +// SetIpPermissionsEgress sets the IpPermissionsEgress field's value.
    +func (s *SecurityGroup) SetIpPermissionsEgress(v []*IpPermission) *SecurityGroup {
    +	s.IpPermissionsEgress = v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *SecurityGroup) SetOwnerId(v string) *SecurityGroup {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *SecurityGroup) SetTags(v []*Tag) *SecurityGroup {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *SecurityGroup) SetVpcId(v string) *SecurityGroup {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a VPC with a security group that references your security group.
    +type SecurityGroupReference struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of your security group.
    +	//
    +	// GroupId is a required field
    +	GroupId *string `locationName:"groupId" type:"string" required:"true"`
    +
    +	// The ID of the VPC with the referencing security group.
    +	//
    +	// ReferencingVpcId is a required field
    +	ReferencingVpcId *string `locationName:"referencingVpcId" type:"string" required:"true"`
    +
    +	// The ID of the VPC peering connection.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s SecurityGroupReference) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SecurityGroupReference) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *SecurityGroupReference) SetGroupId(v string) *SecurityGroupReference {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetReferencingVpcId sets the ReferencingVpcId field's value.
    +func (s *SecurityGroupReference) SetReferencingVpcId(v string) *SecurityGroupReference {
    +	s.ReferencingVpcId = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGroupReference {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Describes the time period for a Scheduled Instance to start its first schedule.
    +// The time period must span less than one day.
    +type SlotDateTimeRangeRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The earliest date and time, in UTC, for the Scheduled Instance to start.
    +	//
    +	// EarliestTime is a required field
    +	EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +
    +	// The latest date and time, in UTC, for the Scheduled Instance to start. This
    +	// value must be later than or equal to the earliest date and at most three
    +	// months in the future.
    +	//
    +	// LatestTime is a required field
    +	LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s SlotDateTimeRangeRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SlotDateTimeRangeRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *SlotDateTimeRangeRequest) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "SlotDateTimeRangeRequest"}
    +	if s.EarliestTime == nil {
    +		invalidParams.Add(request.NewErrParamRequired("EarliestTime"))
    +	}
    +	if s.LatestTime == nil {
    +		invalidParams.Add(request.NewErrParamRequired("LatestTime"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetEarliestTime sets the EarliestTime field's value.
    +func (s *SlotDateTimeRangeRequest) SetEarliestTime(v time.Time) *SlotDateTimeRangeRequest {
    +	s.EarliestTime = &v
    +	return s
    +}
    +
    +// SetLatestTime sets the LatestTime field's value.
    +func (s *SlotDateTimeRangeRequest) SetLatestTime(v time.Time) *SlotDateTimeRangeRequest {
    +	s.LatestTime = &v
    +	return s
    +}
    +
    +// Describes the time period for a Scheduled Instance to start its first schedule.
    +type SlotStartTimeRangeRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The earliest date and time, in UTC, for the Scheduled Instance to start.
    +	EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The latest date and time, in UTC, for the Scheduled Instance to start.
    +	LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s SlotStartTimeRangeRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SlotStartTimeRangeRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEarliestTime sets the EarliestTime field's value.
    +func (s *SlotStartTimeRangeRequest) SetEarliestTime(v time.Time) *SlotStartTimeRangeRequest {
    +	s.EarliestTime = &v
    +	return s
    +}
    +
    +// SetLatestTime sets the LatestTime field's value.
    +func (s *SlotStartTimeRangeRequest) SetLatestTime(v time.Time) *SlotStartTimeRangeRequest {
    +	s.LatestTime = &v
    +	return s
    +}
    +
    +// Describes a snapshot.
    +type Snapshot struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The data encryption key identifier for the snapshot. This value is a unique
    +	// identifier that corresponds to the data encryption key that was used to encrypt
    +	// the original volume or snapshot copy. Because data encryption keys are inherited
    +	// by volumes created from snapshots, and vice versa, if snapshots share the
    +	// same data encryption key identifier, then they belong to the same volume/snapshot
    +	// lineage. This parameter is only returned by the DescribeSnapshots API operation.
    +	DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"`
    +
    +	// The description for the snapshot.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// Indicates whether the snapshot is encrypted.
    +	Encrypted *bool `locationName:"encrypted" type:"boolean"`
    +
    +	// The full ARN of the AWS Key Management Service (AWS KMS) customer master
    +	// key (CMK) that was used to protect the volume encryption key for the parent
    +	// volume.
    +	KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
    +
    +	// Value from an Amazon-maintained list (amazon | aws-marketplace | microsoft)
    +	// of snapshot owners. Not to be confused with the user-configured AWS account
    +	// alias, which is set from the IAM console.
    +	OwnerAlias *string `locationName:"ownerAlias" type:"string"`
    +
    +	// The AWS account ID of the EBS snapshot owner.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// The progress of the snapshot, as a percentage.
    +	Progress *string `locationName:"progress" type:"string"`
    +
    +	// The ID of the snapshot. Each snapshot receives a unique identifier when it
    +	// is created.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +
    +	// The time stamp when the snapshot was initiated.
    +	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The snapshot state.
    +	State *string `locationName:"status" type:"string" enum:"SnapshotState"`
    +
    +	// Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy
    +	// operation fails (for example, if the proper AWS Key Management Service (AWS
    +	// KMS) permissions are not obtained) this field displays error state details
    +	// to help you diagnose why the error occurred. This parameter is only returned
    +	// by the DescribeSnapshots API operation.
    +	StateMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// Any tags assigned to the snapshot.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the volume that was used to create the snapshot. Snapshots created
    +	// by the CopySnapshot action have an arbitrary volume ID that should not be
    +	// used for any purpose.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +
    +	// The size of the volume, in GiB.
    +	VolumeSize *int64 `locationName:"volumeSize" type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s Snapshot) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Snapshot) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDataEncryptionKeyId sets the DataEncryptionKeyId field's value.
    +func (s *Snapshot) SetDataEncryptionKeyId(v string) *Snapshot {
    +	s.DataEncryptionKeyId = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *Snapshot) SetDescription(v string) *Snapshot {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *Snapshot) SetEncrypted(v bool) *Snapshot {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetKmsKeyId sets the KmsKeyId field's value.
    +func (s *Snapshot) SetKmsKeyId(v string) *Snapshot {
    +	s.KmsKeyId = &v
    +	return s
    +}
    +
    +// SetOwnerAlias sets the OwnerAlias field's value.
    +func (s *Snapshot) SetOwnerAlias(v string) *Snapshot {
    +	s.OwnerAlias = &v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *Snapshot) SetOwnerId(v string) *Snapshot {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetProgress sets the Progress field's value.
    +func (s *Snapshot) SetProgress(v string) *Snapshot {
    +	s.Progress = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *Snapshot) SetSnapshotId(v string) *Snapshot {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetStartTime sets the StartTime field's value.
    +func (s *Snapshot) SetStartTime(v time.Time) *Snapshot {
    +	s.StartTime = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Snapshot) SetState(v string) *Snapshot {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStateMessage sets the StateMessage field's value.
    +func (s *Snapshot) SetStateMessage(v string) *Snapshot {
    +	s.StateMessage = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *Snapshot) SetTags(v []*Tag) *Snapshot {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *Snapshot) SetVolumeId(v string) *Snapshot {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// SetVolumeSize sets the VolumeSize field's value.
    +func (s *Snapshot) SetVolumeSize(v int64) *Snapshot {
    +	s.VolumeSize = &v
    +	return s
    +}
    +
    +// Describes the snapshot created from the imported disk.
    +type SnapshotDetail struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description for the snapshot.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The block device mapping for the snapshot.
    +	DeviceName *string `locationName:"deviceName" type:"string"`
    +
    +	// The size of the disk in the snapshot, in GiB.
    +	DiskImageSize *float64 `locationName:"diskImageSize" type:"double"`
    +
    +	// The format of the disk image from which the snapshot is created.
    +	Format *string `locationName:"format" type:"string"`
    +
    +	// The percentage of progress for the task.
    +	Progress *string `locationName:"progress" type:"string"`
    +
    +	// The snapshot ID of the disk being imported.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +
    +	// A brief status of the snapshot creation.
    +	Status *string `locationName:"status" type:"string"`
    +
    +	// A detailed status message for the snapshot creation.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// The URL used to access the disk image.
    +	Url *string `locationName:"url" type:"string"`
    +
    +	// The S3 bucket for the disk image.
    +	UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s SnapshotDetail) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SnapshotDetail) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *SnapshotDetail) SetDescription(v string) *SnapshotDetail {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDeviceName sets the DeviceName field's value.
    +func (s *SnapshotDetail) SetDeviceName(v string) *SnapshotDetail {
    +	s.DeviceName = &v
    +	return s
    +}
    +
    +// SetDiskImageSize sets the DiskImageSize field's value.
    +func (s *SnapshotDetail) SetDiskImageSize(v float64) *SnapshotDetail {
    +	s.DiskImageSize = &v
    +	return s
    +}
    +
    +// SetFormat sets the Format field's value.
    +func (s *SnapshotDetail) SetFormat(v string) *SnapshotDetail {
    +	s.Format = &v
    +	return s
    +}
    +
    +// SetProgress sets the Progress field's value.
    +func (s *SnapshotDetail) SetProgress(v string) *SnapshotDetail {
    +	s.Progress = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *SnapshotDetail) SetSnapshotId(v string) *SnapshotDetail {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *SnapshotDetail) SetStatus(v string) *SnapshotDetail {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *SnapshotDetail) SetStatusMessage(v string) *SnapshotDetail {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// SetUrl sets the Url field's value.
    +func (s *SnapshotDetail) SetUrl(v string) *SnapshotDetail {
    +	s.Url = &v
    +	return s
    +}
    +
    +// SetUserBucket sets the UserBucket field's value.
    +func (s *SnapshotDetail) SetUserBucket(v *UserBucketDetails) *SnapshotDetail {
    +	s.UserBucket = v
    +	return s
    +}
    +
    +// The disk container object for the import snapshot request.
    +type SnapshotDiskContainer struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The description of the disk image being imported.
    +	Description *string `type:"string"`
    +
    +	// The format of the disk image being imported.
    +	//
    +	// Valid values: RAW | VHD | VMDK | OVA
    +	Format *string `type:"string"`
    +
    +	// The URL to the Amazon S3-based disk image being imported. It can either be
    +	// a https URL (https://..) or an Amazon S3 URL (s3://..).
    +	Url *string `type:"string"`
    +
    +	// The S3 bucket for the disk image.
    +	UserBucket *UserBucket `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s SnapshotDiskContainer) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SnapshotDiskContainer) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *SnapshotDiskContainer) SetDescription(v string) *SnapshotDiskContainer {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetFormat sets the Format field's value.
    +func (s *SnapshotDiskContainer) SetFormat(v string) *SnapshotDiskContainer {
    +	s.Format = &v
    +	return s
    +}
    +
    +// SetUrl sets the Url field's value.
    +func (s *SnapshotDiskContainer) SetUrl(v string) *SnapshotDiskContainer {
    +	s.Url = &v
    +	return s
    +}
    +
    +// SetUserBucket sets the UserBucket field's value.
    +func (s *SnapshotDiskContainer) SetUserBucket(v *UserBucket) *SnapshotDiskContainer {
    +	s.UserBucket = v
    +	return s
    +}
    +
    +// Details about the import snapshot task.
    +type SnapshotTaskDetail struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The description of the snapshot.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The size of the disk in the snapshot, in GiB.
    +	DiskImageSize *float64 `locationName:"diskImageSize" type:"double"`
    +
    +	// The format of the disk image from which the snapshot is created.
    +	Format *string `locationName:"format" type:"string"`
    +
    +	// The percentage of completion for the import snapshot task.
    +	Progress *string `locationName:"progress" type:"string"`
    +
    +	// The snapshot ID of the disk being imported.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +
    +	// A brief status for the import snapshot task.
    +	Status *string `locationName:"status" type:"string"`
    +
    +	// A detailed status message for the import snapshot task.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +
    +	// The URL of the disk image from which the snapshot is created.
    +	Url *string `locationName:"url" type:"string"`
    +
    +	// The S3 bucket for the disk image.
    +	UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s SnapshotTaskDetail) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SnapshotTaskDetail) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *SnapshotTaskDetail) SetDescription(v string) *SnapshotTaskDetail {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetDiskImageSize sets the DiskImageSize field's value.
    +func (s *SnapshotTaskDetail) SetDiskImageSize(v float64) *SnapshotTaskDetail {
    +	s.DiskImageSize = &v
    +	return s
    +}
    +
    +// SetFormat sets the Format field's value.
    +func (s *SnapshotTaskDetail) SetFormat(v string) *SnapshotTaskDetail {
    +	s.Format = &v
    +	return s
    +}
    +
    +// SetProgress sets the Progress field's value.
    +func (s *SnapshotTaskDetail) SetProgress(v string) *SnapshotTaskDetail {
    +	s.Progress = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *SnapshotTaskDetail) SetSnapshotId(v string) *SnapshotTaskDetail {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *SnapshotTaskDetail) SetStatus(v string) *SnapshotTaskDetail {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *SnapshotTaskDetail) SetStatusMessage(v string) *SnapshotTaskDetail {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// SetUrl sets the Url field's value.
    +func (s *SnapshotTaskDetail) SetUrl(v string) *SnapshotTaskDetail {
    +	s.Url = &v
    +	return s
    +}
    +
    +// SetUserBucket sets the UserBucket field's value.
    +func (s *SnapshotTaskDetail) SetUserBucket(v *UserBucketDetails) *SnapshotTaskDetail {
    +	s.UserBucket = v
    +	return s
    +}
    +
    +// Describes the data feed for a Spot instance.
    +type SpotDatafeedSubscription struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon S3 bucket where the Spot instance data feed is located.
    +	Bucket *string `locationName:"bucket" type:"string"`
    +
    +	// The fault codes for the Spot instance request, if any.
    +	Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"`
    +
    +	// The AWS account ID of the account.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// The prefix that is prepended to data feed files.
    +	Prefix *string `locationName:"prefix" type:"string"`
    +
    +	// The state of the Spot instance data feed subscription.
    +	State *string `locationName:"state" type:"string" enum:"DatafeedSubscriptionState"`
    +}
    +
    +// String returns the string representation
    +func (s SpotDatafeedSubscription) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotDatafeedSubscription) GoString() string {
    +	return s.String()
    +}
    +
    +// SetBucket sets the Bucket field's value.
    +func (s *SpotDatafeedSubscription) SetBucket(v string) *SpotDatafeedSubscription {
    +	s.Bucket = &v
    +	return s
    +}
    +
    +// SetFault sets the Fault field's value.
    +func (s *SpotDatafeedSubscription) SetFault(v *SpotInstanceStateFault) *SpotDatafeedSubscription {
    +	s.Fault = v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *SpotDatafeedSubscription) SetOwnerId(v string) *SpotDatafeedSubscription {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetPrefix sets the Prefix field's value.
    +func (s *SpotDatafeedSubscription) SetPrefix(v string) *SpotDatafeedSubscription {
    +	s.Prefix = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *SpotDatafeedSubscription) SetState(v string) *SpotDatafeedSubscription {
    +	s.State = &v
    +	return s
    +}
    +
    +// Describes the launch specification for one or more Spot instances.
    +type SpotFleetLaunchSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Deprecated.
    +	AddressingType *string `locationName:"addressingType" type:"string"`
    +
    +	// One or more block device mapping entries.
    +	BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"`
    +
    +	// Indicates whether the instances are optimized for EBS I/O. This optimization
    +	// provides dedicated throughput to Amazon EBS and an optimized configuration
    +	// stack to provide optimal EBS I/O performance. This optimization isn't available
    +	// with all instance types. Additional usage charges apply when using an EBS
    +	// Optimized instance.
    +	//
    +	// Default: false
    +	EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
    +
    +	// The IAM instance profile.
    +	IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"`
    +
    +	// The ID of the AMI.
    +	ImageId *string `locationName:"imageId" type:"string"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// The ID of the kernel.
    +	KernelId *string `locationName:"kernelId" type:"string"`
    +
    +	// The name of the key pair.
    +	KeyName *string `locationName:"keyName" type:"string"`
    +
    +	// Enable or disable monitoring for the instances.
    +	Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"`
    +
    +	// One or more network interfaces.
    +	NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
    +
    +	// The placement information.
    +	Placement *SpotPlacement `locationName:"placement" type:"structure"`
    +
    +	// The ID of the RAM disk.
    +	RamdiskId *string `locationName:"ramdiskId" type:"string"`
    +
    +	// One or more security groups. When requesting instances in a VPC, you must
    +	// specify the IDs of the security groups. When requesting instances in EC2-Classic,
    +	// you can specify the names or the IDs of the security groups.
    +	SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
    +
    +	// The bid price per unit hour for the specified instance type. If this value
    +	// is not specified, the default is the Spot bid price specified for the fleet.
    +	// To determine the bid price per unit hour, divide the Spot bid price by the
    +	// value of WeightedCapacity.
    +	SpotPrice *string `locationName:"spotPrice" type:"string"`
    +
    +	// The ID of the subnet in which to launch the instances. To specify multiple
    +	// subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08".
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// The user data to make available to the instances. If you are using an AWS
    +	// SDK or command line tool, Base64-encoding is performed for you, and you can
    +	// load the text from a file. Otherwise, you must provide Base64-encoded text.
    +	UserData *string `locationName:"userData" type:"string"`
    +
    +	// The number of units provided by the specified instance type. These are the
    +	// same units that you chose to set the target capacity in terms (instances
    +	// or a performance characteristic such as vCPUs, memory, or I/O).
    +	//
    +	// If the target capacity divided by this value is not a whole number, we round
    +	// the number of instances to the next whole number. If this value is not specified,
    +	// the default is 1.
    +	WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"`
    +}
    +
    +// String returns the string representation
    +func (s SpotFleetLaunchSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotFleetLaunchSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *SpotFleetLaunchSpecification) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "SpotFleetLaunchSpecification"}
    +	if s.NetworkInterfaces != nil {
    +		for i, v := range s.NetworkInterfaces {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAddressingType sets the AddressingType field's value.
    +func (s *SpotFleetLaunchSpecification) SetAddressingType(v string) *SpotFleetLaunchSpecification {
    +	s.AddressingType = &v
    +	return s
    +}
    +
    +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value.
    +func (s *SpotFleetLaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *SpotFleetLaunchSpecification {
    +	s.BlockDeviceMappings = v
    +	return s
    +}
    +
    +// SetEbsOptimized sets the EbsOptimized field's value.
    +func (s *SpotFleetLaunchSpecification) SetEbsOptimized(v bool) *SpotFleetLaunchSpecification {
    +	s.EbsOptimized = &v
    +	return s
    +}
    +
    +// SetIamInstanceProfile sets the IamInstanceProfile field's value.
    +func (s *SpotFleetLaunchSpecification) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *SpotFleetLaunchSpecification {
    +	s.IamInstanceProfile = v
    +	return s
    +}
    +
    +// SetImageId sets the ImageId field's value.
    +func (s *SpotFleetLaunchSpecification) SetImageId(v string) *SpotFleetLaunchSpecification {
    +	s.ImageId = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *SpotFleetLaunchSpecification) SetInstanceType(v string) *SpotFleetLaunchSpecification {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetKernelId sets the KernelId field's value.
    +func (s *SpotFleetLaunchSpecification) SetKernelId(v string) *SpotFleetLaunchSpecification {
    +	s.KernelId = &v
    +	return s
    +}
    +
    +// SetKeyName sets the KeyName field's value.
    +func (s *SpotFleetLaunchSpecification) SetKeyName(v string) *SpotFleetLaunchSpecification {
    +	s.KeyName = &v
    +	return s
    +}
    +
    +// SetMonitoring sets the Monitoring field's value.
    +func (s *SpotFleetLaunchSpecification) SetMonitoring(v *SpotFleetMonitoring) *SpotFleetLaunchSpecification {
    +	s.Monitoring = v
    +	return s
    +}
    +
    +// SetNetworkInterfaces sets the NetworkInterfaces field's value.
    +func (s *SpotFleetLaunchSpecification) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *SpotFleetLaunchSpecification {
    +	s.NetworkInterfaces = v
    +	return s
    +}
    +
    +// SetPlacement sets the Placement field's value.
    +func (s *SpotFleetLaunchSpecification) SetPlacement(v *SpotPlacement) *SpotFleetLaunchSpecification {
    +	s.Placement = v
    +	return s
    +}
    +
    +// SetRamdiskId sets the RamdiskId field's value.
    +func (s *SpotFleetLaunchSpecification) SetRamdiskId(v string) *SpotFleetLaunchSpecification {
    +	s.RamdiskId = &v
    +	return s
    +}
    +
    +// SetSecurityGroups sets the SecurityGroups field's value.
    +func (s *SpotFleetLaunchSpecification) SetSecurityGroups(v []*GroupIdentifier) *SpotFleetLaunchSpecification {
    +	s.SecurityGroups = v
    +	return s
    +}
    +
    +// SetSpotPrice sets the SpotPrice field's value.
    +func (s *SpotFleetLaunchSpecification) SetSpotPrice(v string) *SpotFleetLaunchSpecification {
    +	s.SpotPrice = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *SpotFleetLaunchSpecification) SetSubnetId(v string) *SpotFleetLaunchSpecification {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetUserData sets the UserData field's value.
    +func (s *SpotFleetLaunchSpecification) SetUserData(v string) *SpotFleetLaunchSpecification {
    +	s.UserData = &v
    +	return s
    +}
    +
    +// SetWeightedCapacity sets the WeightedCapacity field's value.
    +func (s *SpotFleetLaunchSpecification) SetWeightedCapacity(v float64) *SpotFleetLaunchSpecification {
    +	s.WeightedCapacity = &v
    +	return s
    +}
    +
    +// Describes whether monitoring is enabled.
    +type SpotFleetMonitoring struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Enables monitoring for the instance.
    +	//
    +	// Default: false
    +	Enabled *bool `locationName:"enabled" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s SpotFleetMonitoring) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotFleetMonitoring) GoString() string {
    +	return s.String()
    +}
    +
    +// SetEnabled sets the Enabled field's value.
    +func (s *SpotFleetMonitoring) SetEnabled(v bool) *SpotFleetMonitoring {
    +	s.Enabled = &v
    +	return s
    +}
    +
    +// Describes a Spot fleet request.
    +type SpotFleetRequestConfig struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The progress of the Spot fleet request. If there is an error, the status
    +	// is error. After all bids are placed, the status is pending_fulfillment. If
    +	// the size of the fleet is equal to or greater than its target capacity, the
    +	// status is fulfilled. If the size of the fleet is decreased, the status is
    +	// pending_termination while Spot instances are terminating.
    +	ActivityStatus *string `locationName:"activityStatus" type:"string" enum:"ActivityStatus"`
    +
    +	// The creation date and time of the request.
    +	//
    +	// CreateTime is a required field
    +	CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +
    +	// Information about the configuration of the Spot fleet request.
    +	//
    +	// SpotFleetRequestConfig is a required field
    +	SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"`
    +
    +	// The ID of the Spot fleet request.
    +	//
    +	// SpotFleetRequestId is a required field
    +	SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"`
    +
    +	// The state of the Spot fleet request.
    +	//
    +	// SpotFleetRequestState is a required field
    +	SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" required:"true" enum:"BatchState"`
    +}
    +
    +// String returns the string representation
    +func (s SpotFleetRequestConfig) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotFleetRequestConfig) GoString() string {
    +	return s.String()
    +}
    +
    +// SetActivityStatus sets the ActivityStatus field's value.
    +func (s *SpotFleetRequestConfig) SetActivityStatus(v string) *SpotFleetRequestConfig {
    +	s.ActivityStatus = &v
    +	return s
    +}
    +
    +// SetCreateTime sets the CreateTime field's value.
    +func (s *SpotFleetRequestConfig) SetCreateTime(v time.Time) *SpotFleetRequestConfig {
    +	s.CreateTime = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestConfig sets the SpotFleetRequestConfig field's value.
    +func (s *SpotFleetRequestConfig) SetSpotFleetRequestConfig(v *SpotFleetRequestConfigData) *SpotFleetRequestConfig {
    +	s.SpotFleetRequestConfig = v
    +	return s
    +}
    +
    +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value.
    +func (s *SpotFleetRequestConfig) SetSpotFleetRequestId(v string) *SpotFleetRequestConfig {
    +	s.SpotFleetRequestId = &v
    +	return s
    +}
    +
    +// SetSpotFleetRequestState sets the SpotFleetRequestState field's value.
    +func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRequestConfig {
    +	s.SpotFleetRequestState = &v
    +	return s
    +}
    +
    +// Describes the configuration of a Spot fleet request.
    +type SpotFleetRequestConfigData struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates how to allocate the target capacity across the Spot pools specified
    +	// by the Spot fleet request. The default is lowestPrice.
    +	AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"`
    +
    +	// A unique, case-sensitive identifier you provide to ensure idempotency of
    +	// your listings. This helps avoid duplicate listings. For more information,
    +	// see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
    +	ClientToken *string `locationName:"clientToken" type:"string"`
    +
    +	// Indicates whether running Spot instances should be terminated if the target
    +	// capacity of the Spot fleet request is decreased below the current size of
    +	// the Spot fleet.
    +	ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"`
    +
    +	// The number of units fulfilled by this request compared to the set target
    +	// capacity.
    +	FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"`
    +
    +	// Grants the Spot fleet permission to terminate Spot instances on your behalf
    +	// when you cancel its Spot fleet request using CancelSpotFleetRequests or when
    +	// the Spot fleet request expires, if you set terminateInstancesWithExpiration.
    +	//
    +	// IamFleetRole is a required field
    +	IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"`
    +
    +	// Information about the launch specifications for the Spot fleet request.
    +	//
    +	// LaunchSpecifications is a required field
    +	LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" min:"1" type:"list" required:"true"`
    +
    +	// The bid price per unit hour.
    +	//
    +	// SpotPrice is a required field
    +	SpotPrice *string `locationName:"spotPrice" type:"string" required:"true"`
    +
    +	// The number of units to request. You can choose to set the target capacity
    +	// in terms of instances or a performance characteristic that is important to
    +	// your application workload, such as vCPUs, memory, or I/O.
    +	//
    +	// TargetCapacity is a required field
    +	TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"`
    +
    +	// Indicates whether running Spot instances should be terminated when the Spot
    +	// fleet request expires.
    +	TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"`
    +
    +	// The type of request. Indicates whether the fleet will only request the target
    +	// capacity or also attempt to maintain it. When you request a certain target
    +	// capacity, the fleet will only place the required bids. It will not attempt
    +	// to replenish Spot instances if capacity is diminished, nor will it submit
    +	// bids in alternative Spot pools if capacity is not available. When you want
    +	// to maintain a certain target capacity, fleet will place the required bids
    +	// to meet this target capacity. It will also automatically replenish any interrupted
    +	// instances. Default: maintain.
    +	Type *string `locationName:"type" type:"string" enum:"FleetType"`
    +
    +	// The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	// The default is to start fulfilling the request immediately.
    +	ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	// At this point, no new Spot instance requests are placed or enabled to fulfill
    +	// the request.
    +	ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s SpotFleetRequestConfigData) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotFleetRequestConfigData) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *SpotFleetRequestConfigData) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "SpotFleetRequestConfigData"}
    +	if s.IamFleetRole == nil {
    +		invalidParams.Add(request.NewErrParamRequired("IamFleetRole"))
    +	}
    +	if s.LaunchSpecifications == nil {
    +		invalidParams.Add(request.NewErrParamRequired("LaunchSpecifications"))
    +	}
    +	if s.LaunchSpecifications != nil && len(s.LaunchSpecifications) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("LaunchSpecifications", 1))
    +	}
    +	if s.SpotPrice == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SpotPrice"))
    +	}
    +	if s.TargetCapacity == nil {
    +		invalidParams.Add(request.NewErrParamRequired("TargetCapacity"))
    +	}
    +	if s.LaunchSpecifications != nil {
    +		for i, v := range s.LaunchSpecifications {
    +			if v == nil {
    +				continue
    +			}
    +			if err := v.Validate(); err != nil {
    +				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchSpecifications", i), err.(request.ErrInvalidParams))
    +			}
    +		}
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAllocationStrategy sets the AllocationStrategy field's value.
    +func (s *SpotFleetRequestConfigData) SetAllocationStrategy(v string) *SpotFleetRequestConfigData {
    +	s.AllocationStrategy = &v
    +	return s
    +}
    +
    +// SetClientToken sets the ClientToken field's value.
    +func (s *SpotFleetRequestConfigData) SetClientToken(v string) *SpotFleetRequestConfigData {
    +	s.ClientToken = &v
    +	return s
    +}
    +
    +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value.
    +func (s *SpotFleetRequestConfigData) SetExcessCapacityTerminationPolicy(v string) *SpotFleetRequestConfigData {
    +	s.ExcessCapacityTerminationPolicy = &v
    +	return s
    +}
    +
    +// SetFulfilledCapacity sets the FulfilledCapacity field's value.
    +func (s *SpotFleetRequestConfigData) SetFulfilledCapacity(v float64) *SpotFleetRequestConfigData {
    +	s.FulfilledCapacity = &v
    +	return s
    +}
    +
    +// SetIamFleetRole sets the IamFleetRole field's value.
    +func (s *SpotFleetRequestConfigData) SetIamFleetRole(v string) *SpotFleetRequestConfigData {
    +	s.IamFleetRole = &v
    +	return s
    +}
    +
    +// SetLaunchSpecifications sets the LaunchSpecifications field's value.
    +func (s *SpotFleetRequestConfigData) SetLaunchSpecifications(v []*SpotFleetLaunchSpecification) *SpotFleetRequestConfigData {
    +	s.LaunchSpecifications = v
    +	return s
    +}
    +
    +// SetSpotPrice sets the SpotPrice field's value.
    +func (s *SpotFleetRequestConfigData) SetSpotPrice(v string) *SpotFleetRequestConfigData {
    +	s.SpotPrice = &v
    +	return s
    +}
    +
    +// SetTargetCapacity sets the TargetCapacity field's value.
    +func (s *SpotFleetRequestConfigData) SetTargetCapacity(v int64) *SpotFleetRequestConfigData {
    +	s.TargetCapacity = &v
    +	return s
    +}
    +
    +// SetTerminateInstancesWithExpiration sets the TerminateInstancesWithExpiration field's value.
    +func (s *SpotFleetRequestConfigData) SetTerminateInstancesWithExpiration(v bool) *SpotFleetRequestConfigData {
    +	s.TerminateInstancesWithExpiration = &v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *SpotFleetRequestConfigData) SetType(v string) *SpotFleetRequestConfigData {
    +	s.Type = &v
    +	return s
    +}
    +
    +// SetValidFrom sets the ValidFrom field's value.
    +func (s *SpotFleetRequestConfigData) SetValidFrom(v time.Time) *SpotFleetRequestConfigData {
    +	s.ValidFrom = &v
    +	return s
    +}
    +
    +// SetValidUntil sets the ValidUntil field's value.
    +func (s *SpotFleetRequestConfigData) SetValidUntil(v time.Time) *SpotFleetRequestConfigData {
    +	s.ValidUntil = &v
    +	return s
    +}
    +
    +// Describes a Spot instance request.
    +type SpotInstanceRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// If you specified a duration and your Spot instance request was fulfilled,
    +	// this is the fixed hourly price in effect for the Spot instance while it runs.
    +	ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"`
    +
    +	// The Availability Zone group. If you specify the same Availability Zone group
    +	// for all Spot instance requests, all Spot instances are launched in the same
    +	// Availability Zone.
    +	AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"`
    +
    +	// The duration for the Spot instance, in minutes.
    +	BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"`
    +
    +	// The date and time when the Spot instance request was created, in UTC format
    +	// (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The fault codes for the Spot instance request, if any.
    +	Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"`
    +
    +	// The instance ID, if an instance has been launched to fulfill the Spot instance
    +	// request.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The instance launch group. Launch groups are Spot instances that launch together
    +	// and terminate together.
    +	LaunchGroup *string `locationName:"launchGroup" type:"string"`
    +
    +	// Additional information for launching instances.
    +	LaunchSpecification *LaunchSpecification `locationName:"launchSpecification" type:"structure"`
    +
    +	// The Availability Zone in which the bid is launched.
    +	LaunchedAvailabilityZone *string `locationName:"launchedAvailabilityZone" type:"string"`
    +
    +	// The product description associated with the Spot instance.
    +	ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"`
    +
    +	// The ID of the Spot instance request.
    +	SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"`
    +
    +	// The maximum hourly price (bid) for the Spot instance launched to fulfill
    +	// the request.
    +	SpotPrice *string `locationName:"spotPrice" type:"string"`
    +
    +	// The state of the Spot instance request. Spot bid status information can help
    +	// you track your Spot instance requests. For more information, see Spot Bid
    +	// Status (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	State *string `locationName:"state" type:"string" enum:"SpotInstanceState"`
    +
    +	// The status code and status message describing the Spot instance request.
    +	Status *SpotInstanceStatus `locationName:"status" type:"structure"`
    +
    +	// Any tags assigned to the resource.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The Spot instance request type.
    +	Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"`
    +
    +	// The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	// The request becomes active at this date and time.
    +	ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	// If this is a one-time request, it remains active until all instances launch,
    +	// the request is canceled, or this date is reached. If the request is persistent,
    +	// it remains active until it is canceled or this date is reached.
    +	ValidUntil *time.Time `locationName:"validUntil" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s SpotInstanceRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotInstanceRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// SetActualBlockHourlyPrice sets the ActualBlockHourlyPrice field's value.
    +func (s *SpotInstanceRequest) SetActualBlockHourlyPrice(v string) *SpotInstanceRequest {
    +	s.ActualBlockHourlyPrice = &v
    +	return s
    +}
    +
    +// SetAvailabilityZoneGroup sets the AvailabilityZoneGroup field's value.
    +func (s *SpotInstanceRequest) SetAvailabilityZoneGroup(v string) *SpotInstanceRequest {
    +	s.AvailabilityZoneGroup = &v
    +	return s
    +}
    +
    +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value.
    +func (s *SpotInstanceRequest) SetBlockDurationMinutes(v int64) *SpotInstanceRequest {
    +	s.BlockDurationMinutes = &v
    +	return s
    +}
    +
    +// SetCreateTime sets the CreateTime field's value.
    +func (s *SpotInstanceRequest) SetCreateTime(v time.Time) *SpotInstanceRequest {
    +	s.CreateTime = &v
    +	return s
    +}
    +
    +// SetFault sets the Fault field's value.
    +func (s *SpotInstanceRequest) SetFault(v *SpotInstanceStateFault) *SpotInstanceRequest {
    +	s.Fault = v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *SpotInstanceRequest) SetInstanceId(v string) *SpotInstanceRequest {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetLaunchGroup sets the LaunchGroup field's value.
    +func (s *SpotInstanceRequest) SetLaunchGroup(v string) *SpotInstanceRequest {
    +	s.LaunchGroup = &v
    +	return s
    +}
    +
    +// SetLaunchSpecification sets the LaunchSpecification field's value.
    +func (s *SpotInstanceRequest) SetLaunchSpecification(v *LaunchSpecification) *SpotInstanceRequest {
    +	s.LaunchSpecification = v
    +	return s
    +}
    +
    +// SetLaunchedAvailabilityZone sets the LaunchedAvailabilityZone field's value.
    +func (s *SpotInstanceRequest) SetLaunchedAvailabilityZone(v string) *SpotInstanceRequest {
    +	s.LaunchedAvailabilityZone = &v
    +	return s
    +}
    +
    +// SetProductDescription sets the ProductDescription field's value.
    +func (s *SpotInstanceRequest) SetProductDescription(v string) *SpotInstanceRequest {
    +	s.ProductDescription = &v
    +	return s
    +}
    +
    +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value.
    +func (s *SpotInstanceRequest) SetSpotInstanceRequestId(v string) *SpotInstanceRequest {
    +	s.SpotInstanceRequestId = &v
    +	return s
    +}
    +
    +// SetSpotPrice sets the SpotPrice field's value.
    +func (s *SpotInstanceRequest) SetSpotPrice(v string) *SpotInstanceRequest {
    +	s.SpotPrice = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *SpotInstanceRequest) SetState(v string) *SpotInstanceRequest {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *SpotInstanceRequest) SetStatus(v *SpotInstanceStatus) *SpotInstanceRequest {
    +	s.Status = v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *SpotInstanceRequest) SetTags(v []*Tag) *SpotInstanceRequest {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *SpotInstanceRequest) SetType(v string) *SpotInstanceRequest {
    +	s.Type = &v
    +	return s
    +}
    +
    +// SetValidFrom sets the ValidFrom field's value.
    +func (s *SpotInstanceRequest) SetValidFrom(v time.Time) *SpotInstanceRequest {
    +	s.ValidFrom = &v
    +	return s
    +}
    +
    +// SetValidUntil sets the ValidUntil field's value.
    +func (s *SpotInstanceRequest) SetValidUntil(v time.Time) *SpotInstanceRequest {
    +	s.ValidUntil = &v
    +	return s
    +}
    +
    +// Describes a Spot instance state change.
    +type SpotInstanceStateFault struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The reason code for the Spot instance state change.
    +	Code *string `locationName:"code" type:"string"`
    +
    +	// The message for the Spot instance state change.
    +	Message *string `locationName:"message" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s SpotInstanceStateFault) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotInstanceStateFault) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *SpotInstanceStateFault) SetCode(v string) *SpotInstanceStateFault {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *SpotInstanceStateFault) SetMessage(v string) *SpotInstanceStateFault {
    +	s.Message = &v
    +	return s
    +}
    +
    +// Describes the status of a Spot instance request.
    +type SpotInstanceStatus struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The status code. For a list of status codes, see Spot Bid Status Codes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	Code *string `locationName:"code" type:"string"`
    +
    +	// The description for the status code.
    +	Message *string `locationName:"message" type:"string"`
    +
    +	// The date and time of the most recent status update, in UTC format (for example,
    +	// YYYY-MM-DDTHH:MM:SSZ).
    +	UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s SpotInstanceStatus) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotInstanceStatus) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *SpotInstanceStatus) SetCode(v string) *SpotInstanceStatus {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *SpotInstanceStatus) SetMessage(v string) *SpotInstanceStatus {
    +	s.Message = &v
    +	return s
    +}
    +
    +// SetUpdateTime sets the UpdateTime field's value.
    +func (s *SpotInstanceStatus) SetUpdateTime(v time.Time) *SpotInstanceStatus {
    +	s.UpdateTime = &v
    +	return s
    +}
    +
    +// Describes Spot instance placement.
    +type SpotPlacement struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone.
    +	//
    +	// [Spot fleet only] To specify multiple Availability Zones, separate them using
    +	// commas; for example, "us-west-2a, us-west-2b".
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The name of the placement group (for cluster instances).
    +	GroupName *string `locationName:"groupName" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s SpotPlacement) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotPlacement) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *SpotPlacement) SetAvailabilityZone(v string) *SpotPlacement {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *SpotPlacement) SetGroupName(v string) *SpotPlacement {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// Describes the maximum hourly price (bid) for any Spot instance launched to
    +// fulfill the request.
    +type SpotPrice struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The instance type.
    +	InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
    +
    +	// A general description of the AMI.
    +	ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"`
    +
    +	// The maximum price (bid) that you are willing to pay for a Spot instance.
    +	SpotPrice *string `locationName:"spotPrice" type:"string"`
    +
    +	// The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).
    +	Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s SpotPrice) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s SpotPrice) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *SpotPrice) SetAvailabilityZone(v string) *SpotPrice {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetInstanceType sets the InstanceType field's value.
    +func (s *SpotPrice) SetInstanceType(v string) *SpotPrice {
    +	s.InstanceType = &v
    +	return s
    +}
    +
    +// SetProductDescription sets the ProductDescription field's value.
    +func (s *SpotPrice) SetProductDescription(v string) *SpotPrice {
    +	s.ProductDescription = &v
    +	return s
    +}
    +
    +// SetSpotPrice sets the SpotPrice field's value.
    +func (s *SpotPrice) SetSpotPrice(v string) *SpotPrice {
    +	s.SpotPrice = &v
    +	return s
    +}
    +
    +// SetTimestamp sets the Timestamp field's value.
    +func (s *SpotPrice) SetTimestamp(v time.Time) *SpotPrice {
    +	s.Timestamp = &v
    +	return s
    +}
    +
    +// Describes a stale rule in a security group.
    +type StaleIpPermission struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The start of the port range for the TCP and UDP protocols, or an ICMP type
    +	// number. A value of -1 indicates all ICMP types.
    +	FromPort *int64 `locationName:"fromPort" type:"integer"`
    +
    +	// The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers)
    +	// (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
    +	IpProtocol *string `locationName:"ipProtocol" type:"string"`
    +
    +	// One or more IP ranges. Not applicable for stale security group rules.
    +	IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"`
    +
    +	// One or more prefix list IDs for an AWS service. Not applicable for stale
    +	// security group rules.
    +	PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"`
    +
    +	// The end of the port range for the TCP and UDP protocols, or an ICMP type
    +	// number. A value of -1 indicates all ICMP types.
    +	ToPort *int64 `locationName:"toPort" type:"integer"`
    +
    +	// One or more security group pairs. Returns the ID of the referenced security
    +	// group and VPC, and the ID and status of the VPC peering connection.
    +	UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s StaleIpPermission) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StaleIpPermission) GoString() string {
    +	return s.String()
    +}
    +
    +// SetFromPort sets the FromPort field's value.
    +func (s *StaleIpPermission) SetFromPort(v int64) *StaleIpPermission {
    +	s.FromPort = &v
    +	return s
    +}
    +
    +// SetIpProtocol sets the IpProtocol field's value.
    +func (s *StaleIpPermission) SetIpProtocol(v string) *StaleIpPermission {
    +	s.IpProtocol = &v
    +	return s
    +}
    +
    +// SetIpRanges sets the IpRanges field's value.
    +func (s *StaleIpPermission) SetIpRanges(v []*string) *StaleIpPermission {
    +	s.IpRanges = v
    +	return s
    +}
    +
    +// SetPrefixListIds sets the PrefixListIds field's value.
    +func (s *StaleIpPermission) SetPrefixListIds(v []*string) *StaleIpPermission {
    +	s.PrefixListIds = v
    +	return s
    +}
    +
    +// SetToPort sets the ToPort field's value.
    +func (s *StaleIpPermission) SetToPort(v int64) *StaleIpPermission {
    +	s.ToPort = &v
    +	return s
    +}
    +
    +// SetUserIdGroupPairs sets the UserIdGroupPairs field's value.
    +func (s *StaleIpPermission) SetUserIdGroupPairs(v []*UserIdGroupPair) *StaleIpPermission {
    +	s.UserIdGroupPairs = v
    +	return s
    +}
    +
    +// Describes a stale security group (a security group that contains stale rules).
    +type StaleSecurityGroup struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The description of the security group.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of the security group.
    +	//
    +	// GroupId is a required field
    +	GroupId *string `locationName:"groupId" type:"string" required:"true"`
    +
    +	// The name of the security group.
    +	GroupName *string `locationName:"groupName" type:"string"`
    +
    +	// Information about the stale inbound rules in the security group.
    +	StaleIpPermissions []*StaleIpPermission `locationName:"staleIpPermissions" locationNameList:"item" type:"list"`
    +
    +	// Information about the stale outbound rules in the security group.
    +	StaleIpPermissionsEgress []*StaleIpPermission `locationName:"staleIpPermissionsEgress" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC for the security group.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s StaleSecurityGroup) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StaleSecurityGroup) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *StaleSecurityGroup) SetDescription(v string) *StaleSecurityGroup {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *StaleSecurityGroup) SetGroupId(v string) *StaleSecurityGroup {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *StaleSecurityGroup) SetGroupName(v string) *StaleSecurityGroup {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetStaleIpPermissions sets the StaleIpPermissions field's value.
    +func (s *StaleSecurityGroup) SetStaleIpPermissions(v []*StaleIpPermission) *StaleSecurityGroup {
    +	s.StaleIpPermissions = v
    +	return s
    +}
    +
    +// SetStaleIpPermissionsEgress sets the StaleIpPermissionsEgress field's value.
    +func (s *StaleSecurityGroup) SetStaleIpPermissionsEgress(v []*StaleIpPermission) *StaleSecurityGroup {
    +	s.StaleIpPermissionsEgress = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *StaleSecurityGroup) SetVpcId(v string) *StaleSecurityGroup {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Contains the parameters for StartInstances.
    +type StartInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Reserved.
    +	AdditionalInfo *string `locationName:"additionalInfo" type:"string"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// InstanceIds is a required field
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s StartInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StartInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *StartInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "StartInstancesInput"}
    +	if s.InstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetAdditionalInfo sets the AdditionalInfo field's value.
    +func (s *StartInstancesInput) SetAdditionalInfo(v string) *StartInstancesInput {
    +	s.AdditionalInfo = &v
    +	return s
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *StartInstancesInput) SetDryRun(v bool) *StartInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *StartInstancesInput) SetInstanceIds(v []*string) *StartInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// Contains the output of StartInstances.
    +type StartInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more started instances.
    +	StartingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s StartInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StartInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetStartingInstances sets the StartingInstances field's value.
    +func (s *StartInstancesOutput) SetStartingInstances(v []*InstanceStateChange) *StartInstancesOutput {
    +	s.StartingInstances = v
    +	return s
    +}
    +
    +// Describes a state change.
    +type StateReason struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The reason code for the state change.
    +	Code *string `locationName:"code" type:"string"`
    +
    +	// The message for the state change.
    +	//
    +	//    * Server.SpotInstanceTermination: A Spot instance was terminated due to
    +	//    an increase in the market price.
    +	//
    +	//    * Server.InternalError: An internal error occurred during instance launch,
    +	//    resulting in termination.
    +	//
    +	//    * Server.InsufficientInstanceCapacity: There was insufficient instance
    +	//    capacity to satisfy the launch request.
    +	//
    +	//    * Client.InternalError: A client error caused the instance to terminate
    +	//    on launch.
    +	//
    +	//    * Client.InstanceInitiatedShutdown: The instance was shut down using the
    +	//    shutdown -h command from the instance.
    +	//
    +	//    * Client.UserInitiatedShutdown: The instance was shut down using the Amazon
    +	//    EC2 API.
    +	//
    +	//    * Client.VolumeLimitExceeded: The limit on the number of EBS volumes or
    +	//    total storage was exceeded. Decrease usage or request an increase in your
    +	//    limits.
    +	//
    +	//    * Client.InvalidSnapshot.NotFound: The specified snapshot was not found.
    +	Message *string `locationName:"message" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s StateReason) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StateReason) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *StateReason) SetCode(v string) *StateReason {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *StateReason) SetMessage(v string) *StateReason {
    +	s.Message = &v
    +	return s
    +}
    +
    +// Contains the parameters for StopInstances.
    +type StopInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// Forces the instances to stop. The instances do not have an opportunity to
    +	// flush file system caches or file system metadata. If you use this option,
    +	// you must perform file system check and repair procedures. This option is
    +	// not recommended for Windows instances.
    +	//
    +	// Default: false
    +	Force *bool `locationName:"force" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// InstanceIds is a required field
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s StopInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StopInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *StopInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "StopInstancesInput"}
    +	if s.InstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *StopInstancesInput) SetDryRun(v bool) *StopInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetForce sets the Force field's value.
    +func (s *StopInstancesInput) SetForce(v bool) *StopInstancesInput {
    +	s.Force = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *StopInstancesInput) SetInstanceIds(v []*string) *StopInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// Contains the output of StopInstances.
    +type StopInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more stopped instances.
    +	StoppingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s StopInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s StopInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetStoppingInstances sets the StoppingInstances field's value.
    +func (s *StopInstancesOutput) SetStoppingInstances(v []*InstanceStateChange) *StopInstancesOutput {
    +	s.StoppingInstances = v
    +	return s
    +}
    +
    +// Describes the storage location for an instance store-backed AMI.
    +type Storage struct {
    +	_ struct{} `type:"structure"`
    +
    +	// An Amazon S3 storage location.
    +	S3 *S3Storage `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s Storage) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Storage) GoString() string {
    +	return s.String()
    +}
    +
    +// SetS3 sets the S3 field's value.
    +func (s *Storage) SetS3(v *S3Storage) *Storage {
    +	s.S3 = v
    +	return s
    +}
    +
    +// Describes a subnet.
    +type Subnet struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone of the subnet.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The number of unused IP addresses in the subnet. Note that the IP addresses
    +	// for any stopped instances are considered unavailable.
    +	AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"`
    +
    +	// The CIDR block assigned to the subnet.
    +	CidrBlock *string `locationName:"cidrBlock" type:"string"`
    +
    +	// Indicates whether this is the default subnet for the Availability Zone.
    +	DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"`
    +
    +	// Indicates whether instances launched in this subnet receive a public IP address.
    +	MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"`
    +
    +	// The current state of the subnet.
    +	State *string `locationName:"state" type:"string" enum:"SubnetState"`
    +
    +	// The ID of the subnet.
    +	SubnetId *string `locationName:"subnetId" type:"string"`
    +
    +	// Any tags assigned to the subnet.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC the subnet is in.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Subnet) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Subnet) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *Subnet) SetAvailabilityZone(v string) *Subnet {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetAvailableIpAddressCount sets the AvailableIpAddressCount field's value.
    +func (s *Subnet) SetAvailableIpAddressCount(v int64) *Subnet {
    +	s.AvailableIpAddressCount = &v
    +	return s
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *Subnet) SetCidrBlock(v string) *Subnet {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetDefaultForAz sets the DefaultForAz field's value.
    +func (s *Subnet) SetDefaultForAz(v bool) *Subnet {
    +	s.DefaultForAz = &v
    +	return s
    +}
    +
    +// SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value.
    +func (s *Subnet) SetMapPublicIpOnLaunch(v bool) *Subnet {
    +	s.MapPublicIpOnLaunch = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Subnet) SetState(v string) *Subnet {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetSubnetId sets the SubnetId field's value.
    +func (s *Subnet) SetSubnetId(v string) *Subnet {
    +	s.SubnetId = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *Subnet) SetTags(v []*Tag) *Subnet {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *Subnet) SetVpcId(v string) *Subnet {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a tag.
    +type Tag struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The key of the tag.
    +	//
    +	// Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode
    +	// characters. May not begin with aws:
    +	Key *string `locationName:"key" type:"string"`
    +
    +	// The value of the tag.
    +	//
    +	// Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode
    +	// characters.
    +	Value *string `locationName:"value" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Tag) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Tag) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKey sets the Key field's value.
    +func (s *Tag) SetKey(v string) *Tag {
    +	s.Key = &v
    +	return s
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *Tag) SetValue(v string) *Tag {
    +	s.Value = &v
    +	return s
    +}
    +
    +// Describes a tag.
    +type TagDescription struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The tag key.
    +	Key *string `locationName:"key" type:"string"`
    +
    +	// The ID of the resource. For example, ami-1a2b3c4d.
    +	ResourceId *string `locationName:"resourceId" type:"string"`
    +
    +	// The resource type.
    +	ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"`
    +
    +	// The tag value.
    +	Value *string `locationName:"value" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s TagDescription) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s TagDescription) GoString() string {
    +	return s.String()
    +}
    +
    +// SetKey sets the Key field's value.
    +func (s *TagDescription) SetKey(v string) *TagDescription {
    +	s.Key = &v
    +	return s
    +}
    +
    +// SetResourceId sets the ResourceId field's value.
    +func (s *TagDescription) SetResourceId(v string) *TagDescription {
    +	s.ResourceId = &v
    +	return s
    +}
    +
    +// SetResourceType sets the ResourceType field's value.
    +func (s *TagDescription) SetResourceType(v string) *TagDescription {
    +	s.ResourceType = &v
    +	return s
    +}
    +
    +// SetValue sets the Value field's value.
    +func (s *TagDescription) SetValue(v string) *TagDescription {
    +	s.Value = &v
    +	return s
    +}
    +
    +// Information about the Convertible Reserved Instance offering.
    +type TargetConfiguration struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of instances the Convertible Reserved Instance offering can be
    +	// applied to. This parameter is reserved and cannot be specified in a request
    +	InstanceCount *int64 `locationName:"instanceCount" type:"integer"`
    +
    +	// The ID of the Convertible Reserved Instance offering.
    +	OfferingId *string `locationName:"offeringId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s TargetConfiguration) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s TargetConfiguration) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *TargetConfiguration) SetInstanceCount(v int64) *TargetConfiguration {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *TargetConfiguration) SetOfferingId(v string) *TargetConfiguration {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +// Details about the target configuration.
    +type TargetConfigurationRequest struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of instances the Covertible Reserved Instance offering can be
    +	// applied to. This parameter is reserved and cannot be specified in a request
    +	InstanceCount *int64 `type:"integer"`
    +
    +	// The Convertible Reserved Instance offering ID. If this isn't included in
    +	// the request, the response lists your current Convertible Reserved Instance/s
    +	// and their value/s.
    +	//
    +	// OfferingId is a required field
    +	OfferingId *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s TargetConfigurationRequest) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s TargetConfigurationRequest) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *TargetConfigurationRequest) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "TargetConfigurationRequest"}
    +	if s.OfferingId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("OfferingId"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetInstanceCount sets the InstanceCount field's value.
    +func (s *TargetConfigurationRequest) SetInstanceCount(v int64) *TargetConfigurationRequest {
    +	s.InstanceCount = &v
    +	return s
    +}
    +
    +// SetOfferingId sets the OfferingId field's value.
    +func (s *TargetConfigurationRequest) SetOfferingId(v string) *TargetConfigurationRequest {
    +	s.OfferingId = &v
    +	return s
    +}
    +
    +// The total value of the new Convertible Reserved Instances.
    +type TargetReservationValue struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The total value of the Convertible Reserved Instances that make up the exchange.
    +	// This is the sum of the list value, remaining upfront price, and additional
    +	// upfront cost of the exchange.
    +	ReservationValue *ReservationValue `locationName:"reservationValue" type:"structure"`
    +
    +	// The configuration of the Convertible Reserved Instances that make up the
    +	// exchange.
    +	TargetConfiguration *TargetConfiguration `locationName:"targetConfiguration" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s TargetReservationValue) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s TargetReservationValue) GoString() string {
    +	return s.String()
    +}
    +
    +// SetReservationValue sets the ReservationValue field's value.
    +func (s *TargetReservationValue) SetReservationValue(v *ReservationValue) *TargetReservationValue {
    +	s.ReservationValue = v
    +	return s
    +}
    +
    +// SetTargetConfiguration sets the TargetConfiguration field's value.
    +func (s *TargetReservationValue) SetTargetConfiguration(v *TargetConfiguration) *TargetReservationValue {
    +	s.TargetConfiguration = v
    +	return s
    +}
    +
    +// Contains the parameters for TerminateInstances.
    +type TerminateInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// Constraints: Up to 1000 instance IDs. We recommend breaking up this request
    +	// into smaller batches.
    +	//
    +	// InstanceIds is a required field
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s TerminateInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s TerminateInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *TerminateInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "TerminateInstancesInput"}
    +	if s.InstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *TerminateInstancesInput) SetDryRun(v bool) *TerminateInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *TerminateInstancesInput) SetInstanceIds(v []*string) *TerminateInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// Contains the output of TerminateInstances.
    +type TerminateInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about one or more terminated instances.
    +	TerminatingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s TerminateInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s TerminateInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetTerminatingInstances sets the TerminatingInstances field's value.
    +func (s *TerminateInstancesOutput) SetTerminatingInstances(v []*InstanceStateChange) *TerminateInstancesOutput {
    +	s.TerminatingInstances = v
    +	return s
    +}
    +
    +// Contains the parameters for UnassignPrivateIpAddresses.
    +type UnassignPrivateIpAddressesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the network interface.
    +	//
    +	// NetworkInterfaceId is a required field
    +	NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"`
    +
    +	// The secondary private IP addresses to unassign from the network interface.
    +	// You can specify this option multiple times to unassign more than one IP address.
    +	//
    +	// PrivateIpAddresses is a required field
    +	PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s UnassignPrivateIpAddressesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UnassignPrivateIpAddressesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *UnassignPrivateIpAddressesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "UnassignPrivateIpAddressesInput"}
    +	if s.NetworkInterfaceId == nil {
    +		invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId"))
    +	}
    +	if s.PrivateIpAddresses == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PrivateIpAddresses"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value.
    +func (s *UnassignPrivateIpAddressesInput) SetNetworkInterfaceId(v string) *UnassignPrivateIpAddressesInput {
    +	s.NetworkInterfaceId = &v
    +	return s
    +}
    +
    +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value.
    +func (s *UnassignPrivateIpAddressesInput) SetPrivateIpAddresses(v []*string) *UnassignPrivateIpAddressesInput {
    +	s.PrivateIpAddresses = v
    +	return s
    +}
    +
    +type UnassignPrivateIpAddressesOutput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s UnassignPrivateIpAddressesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UnassignPrivateIpAddressesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the parameters for UnmonitorInstances.
    +type UnmonitorInstancesInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Checks whether you have the required permissions for the action, without
    +	// actually making the request, and provides an error response. If you have
    +	// the required permissions, the error response is DryRunOperation. Otherwise,
    +	// it is UnauthorizedOperation.
    +	DryRun *bool `locationName:"dryRun" type:"boolean"`
    +
    +	// One or more instance IDs.
    +	//
    +	// InstanceIds is a required field
    +	InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s UnmonitorInstancesInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UnmonitorInstancesInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *UnmonitorInstancesInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "UnmonitorInstancesInput"}
    +	if s.InstanceIds == nil {
    +		invalidParams.Add(request.NewErrParamRequired("InstanceIds"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDryRun sets the DryRun field's value.
    +func (s *UnmonitorInstancesInput) SetDryRun(v bool) *UnmonitorInstancesInput {
    +	s.DryRun = &v
    +	return s
    +}
    +
    +// SetInstanceIds sets the InstanceIds field's value.
    +func (s *UnmonitorInstancesInput) SetInstanceIds(v []*string) *UnmonitorInstancesInput {
    +	s.InstanceIds = v
    +	return s
    +}
    +
    +// Contains the output of UnmonitorInstances.
    +type UnmonitorInstancesOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Monitoring information for one or more instances.
    +	InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"`
    +}
    +
    +// String returns the string representation
    +func (s UnmonitorInstancesOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UnmonitorInstancesOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetInstanceMonitorings sets the InstanceMonitorings field's value.
    +func (s *UnmonitorInstancesOutput) SetInstanceMonitorings(v []*InstanceMonitoring) *UnmonitorInstancesOutput {
    +	s.InstanceMonitorings = v
    +	return s
    +}
    +
    +// Information about items that were not successfully processed in a batch call.
    +type UnsuccessfulItem struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the error.
    +	//
    +	// Error is a required field
    +	Error *UnsuccessfulItemError `locationName:"error" type:"structure" required:"true"`
    +
    +	// The ID of the resource.
    +	ResourceId *string `locationName:"resourceId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s UnsuccessfulItem) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UnsuccessfulItem) GoString() string {
    +	return s.String()
    +}
    +
    +// SetError sets the Error field's value.
    +func (s *UnsuccessfulItem) SetError(v *UnsuccessfulItemError) *UnsuccessfulItem {
    +	s.Error = v
    +	return s
    +}
    +
    +// SetResourceId sets the ResourceId field's value.
    +func (s *UnsuccessfulItem) SetResourceId(v string) *UnsuccessfulItem {
    +	s.ResourceId = &v
    +	return s
    +}
    +
    +// Information about the error that occurred. For more information about errors,
    +// see Error Codes (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
    +type UnsuccessfulItemError struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The error code.
    +	//
    +	// Code is a required field
    +	Code *string `locationName:"code" type:"string" required:"true"`
    +
    +	// The error message accompanying the error code.
    +	//
    +	// Message is a required field
    +	Message *string `locationName:"message" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s UnsuccessfulItemError) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UnsuccessfulItemError) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *UnsuccessfulItemError) SetCode(v string) *UnsuccessfulItemError {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *UnsuccessfulItemError) SetMessage(v string) *UnsuccessfulItemError {
    +	s.Message = &v
    +	return s
    +}
    +
    +// Describes the S3 bucket for the disk image.
    +type UserBucket struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the S3 bucket where the disk image is located.
    +	S3Bucket *string `type:"string"`
    +
    +	// The file name of the disk image.
    +	S3Key *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s UserBucket) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UserBucket) GoString() string {
    +	return s.String()
    +}
    +
    +// SetS3Bucket sets the S3Bucket field's value.
    +func (s *UserBucket) SetS3Bucket(v string) *UserBucket {
    +	s.S3Bucket = &v
    +	return s
    +}
    +
    +// SetS3Key sets the S3Key field's value.
    +func (s *UserBucket) SetS3Key(v string) *UserBucket {
    +	s.S3Key = &v
    +	return s
    +}
    +
    +// Describes the S3 bucket for the disk image.
    +type UserBucketDetails struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The S3 bucket from which the disk image was created.
    +	S3Bucket *string `locationName:"s3Bucket" type:"string"`
    +
    +	// The file name of the disk image.
    +	S3Key *string `locationName:"s3Key" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s UserBucketDetails) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UserBucketDetails) GoString() string {
    +	return s.String()
    +}
    +
    +// SetS3Bucket sets the S3Bucket field's value.
    +func (s *UserBucketDetails) SetS3Bucket(v string) *UserBucketDetails {
    +	s.S3Bucket = &v
    +	return s
    +}
    +
    +// SetS3Key sets the S3Key field's value.
    +func (s *UserBucketDetails) SetS3Key(v string) *UserBucketDetails {
    +	s.S3Key = &v
    +	return s
    +}
    +
    +// Describes the user data for an instance.
    +type UserData struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The user data. If you are using an AWS SDK or command line tool, Base64-encoding
    +	// is performed for you, and you can load the text from a file. Otherwise, you
    +	// must provide Base64-encoded text.
    +	Data *string `locationName:"data" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s UserData) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UserData) GoString() string {
    +	return s.String()
    +}
    +
    +// SetData sets the Data field's value.
    +func (s *UserData) SetData(v string) *UserData {
    +	s.Data = &v
    +	return s
    +}
    +
    +// Describes a security group and AWS account ID pair.
    +type UserIdGroupPair struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ID of the security group.
    +	GroupId *string `locationName:"groupId" type:"string"`
    +
    +	// The name of the security group. In a request, use this parameter for a security
    +	// group in EC2-Classic or a default VPC only. For a security group in a nondefault
    +	// VPC, use the security group ID.
    +	GroupName *string `locationName:"groupName" type:"string"`
    +
    +	// The status of a VPC peering connection, if applicable.
    +	PeeringStatus *string `locationName:"peeringStatus" type:"string"`
    +
    +	// The ID of an AWS account. For a referenced security group in another VPC,
    +	// the account ID of the referenced security group is returned.
    +	//
    +	// [EC2-Classic] Required when adding or removing rules that reference a security
    +	// group in another AWS account.
    +	UserId *string `locationName:"userId" type:"string"`
    +
    +	// The ID of the VPC for the referenced security group, if applicable.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +
    +	// The ID of the VPC peering connection, if applicable.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s UserIdGroupPair) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s UserIdGroupPair) GoString() string {
    +	return s.String()
    +}
    +
    +// SetGroupId sets the GroupId field's value.
    +func (s *UserIdGroupPair) SetGroupId(v string) *UserIdGroupPair {
    +	s.GroupId = &v
    +	return s
    +}
    +
    +// SetGroupName sets the GroupName field's value.
    +func (s *UserIdGroupPair) SetGroupName(v string) *UserIdGroupPair {
    +	s.GroupName = &v
    +	return s
    +}
    +
    +// SetPeeringStatus sets the PeeringStatus field's value.
    +func (s *UserIdGroupPair) SetPeeringStatus(v string) *UserIdGroupPair {
    +	s.PeeringStatus = &v
    +	return s
    +}
    +
    +// SetUserId sets the UserId field's value.
    +func (s *UserIdGroupPair) SetUserId(v string) *UserIdGroupPair {
    +	s.UserId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *UserIdGroupPair) SetVpcId(v string) *UserIdGroupPair {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *UserIdGroupPair) SetVpcPeeringConnectionId(v string) *UserIdGroupPair {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Describes telemetry for a VPN tunnel.
    +type VgwTelemetry struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The number of accepted routes.
    +	AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"`
    +
    +	// The date and time of the last change in status.
    +	LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The Internet-routable IP address of the virtual private gateway's outside
    +	// interface.
    +	OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"`
    +
    +	// The status of the VPN tunnel.
    +	Status *string `locationName:"status" type:"string" enum:"TelemetryStatus"`
    +
    +	// If an error occurs, a description of the error.
    +	StatusMessage *string `locationName:"statusMessage" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VgwTelemetry) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VgwTelemetry) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAcceptedRouteCount sets the AcceptedRouteCount field's value.
    +func (s *VgwTelemetry) SetAcceptedRouteCount(v int64) *VgwTelemetry {
    +	s.AcceptedRouteCount = &v
    +	return s
    +}
    +
    +// SetLastStatusChange sets the LastStatusChange field's value.
    +func (s *VgwTelemetry) SetLastStatusChange(v time.Time) *VgwTelemetry {
    +	s.LastStatusChange = &v
    +	return s
    +}
    +
    +// SetOutsideIpAddress sets the OutsideIpAddress field's value.
    +func (s *VgwTelemetry) SetOutsideIpAddress(v string) *VgwTelemetry {
    +	s.OutsideIpAddress = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *VgwTelemetry) SetStatus(v string) *VgwTelemetry {
    +	s.Status = &v
    +	return s
    +}
    +
    +// SetStatusMessage sets the StatusMessage field's value.
    +func (s *VgwTelemetry) SetStatusMessage(v string) *VgwTelemetry {
    +	s.StatusMessage = &v
    +	return s
    +}
    +
    +// Describes a volume.
    +type Volume struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the volume attachments.
    +	Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"`
    +
    +	// The Availability Zone for the volume.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The time stamp when volume creation was initiated.
    +	CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Indicates whether the volume will be encrypted.
    +	Encrypted *bool `locationName:"encrypted" type:"boolean"`
    +
    +	// The number of I/O operations per second (IOPS) that the volume supports.
    +	// For Provisioned IOPS SSD volumes, this represents the number of IOPS that
    +	// are provisioned for the volume. For General Purpose SSD volumes, this represents
    +	// the baseline performance of the volume and the rate at which the volume accumulates
    +	// I/O credits for bursting. For more information on General Purpose SSD baseline
    +	// performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
    +	// in the Amazon Elastic Compute Cloud User Guide.
    +	//
    +	// Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for
    +	// gp2 volumes.
    +	//
    +	// Condition: This parameter is required for requests to create io1 volumes;
    +	// it is not used in requests to create gp2, st1, sc1, or standard volumes.
    +	Iops *int64 `locationName:"iops" type:"integer"`
    +
    +	// The full ARN of the AWS Key Management Service (AWS KMS) customer master
    +	// key (CMK) that was used to protect the volume encryption key for the volume.
    +	KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
    +
    +	// The size of the volume, in GiBs.
    +	Size *int64 `locationName:"size" type:"integer"`
    +
    +	// The snapshot from which the volume was created, if applicable.
    +	SnapshotId *string `locationName:"snapshotId" type:"string"`
    +
    +	// The volume state.
    +	State *string `locationName:"status" type:"string" enum:"VolumeState"`
    +
    +	// Any tags assigned to the volume.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the volume.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +
    +	// The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned
    +	// IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard
    +	// for Magnetic volumes.
    +	VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"`
    +}
    +
    +// String returns the string representation
    +func (s Volume) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Volume) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachments sets the Attachments field's value.
    +func (s *Volume) SetAttachments(v []*VolumeAttachment) *Volume {
    +	s.Attachments = v
    +	return s
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *Volume) SetAvailabilityZone(v string) *Volume {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetCreateTime sets the CreateTime field's value.
    +func (s *Volume) SetCreateTime(v time.Time) *Volume {
    +	s.CreateTime = &v
    +	return s
    +}
    +
    +// SetEncrypted sets the Encrypted field's value.
    +func (s *Volume) SetEncrypted(v bool) *Volume {
    +	s.Encrypted = &v
    +	return s
    +}
    +
    +// SetIops sets the Iops field's value.
    +func (s *Volume) SetIops(v int64) *Volume {
    +	s.Iops = &v
    +	return s
    +}
    +
    +// SetKmsKeyId sets the KmsKeyId field's value.
    +func (s *Volume) SetKmsKeyId(v string) *Volume {
    +	s.KmsKeyId = &v
    +	return s
    +}
    +
    +// SetSize sets the Size field's value.
    +func (s *Volume) SetSize(v int64) *Volume {
    +	s.Size = &v
    +	return s
    +}
    +
    +// SetSnapshotId sets the SnapshotId field's value.
    +func (s *Volume) SetSnapshotId(v string) *Volume {
    +	s.SnapshotId = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Volume) SetState(v string) *Volume {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *Volume) SetTags(v []*Tag) *Volume {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *Volume) SetVolumeId(v string) *Volume {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// SetVolumeType sets the VolumeType field's value.
    +func (s *Volume) SetVolumeType(v string) *Volume {
    +	s.VolumeType = &v
    +	return s
    +}
    +
    +// Describes volume attachment details.
    +type VolumeAttachment struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The time stamp when the attachment initiated.
    +	AttachTime *time.Time `locationName:"attachTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Indicates whether the EBS volume is deleted on instance termination.
    +	DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"`
    +
    +	// The device name.
    +	Device *string `locationName:"device" type:"string"`
    +
    +	// The ID of the instance.
    +	InstanceId *string `locationName:"instanceId" type:"string"`
    +
    +	// The attachment state of the volume.
    +	State *string `locationName:"status" type:"string" enum:"VolumeAttachmentState"`
    +
    +	// The ID of the volume.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeAttachment) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeAttachment) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAttachTime sets the AttachTime field's value.
    +func (s *VolumeAttachment) SetAttachTime(v time.Time) *VolumeAttachment {
    +	s.AttachTime = &v
    +	return s
    +}
    +
    +// SetDeleteOnTermination sets the DeleteOnTermination field's value.
    +func (s *VolumeAttachment) SetDeleteOnTermination(v bool) *VolumeAttachment {
    +	s.DeleteOnTermination = &v
    +	return s
    +}
    +
    +// SetDevice sets the Device field's value.
    +func (s *VolumeAttachment) SetDevice(v string) *VolumeAttachment {
    +	s.Device = &v
    +	return s
    +}
    +
    +// SetInstanceId sets the InstanceId field's value.
    +func (s *VolumeAttachment) SetInstanceId(v string) *VolumeAttachment {
    +	s.InstanceId = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *VolumeAttachment) SetState(v string) *VolumeAttachment {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *VolumeAttachment) SetVolumeId(v string) *VolumeAttachment {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// Describes an EBS volume.
    +type VolumeDetail struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The size of the volume, in GiB.
    +	//
    +	// Size is a required field
    +	Size *int64 `locationName:"size" type:"long" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeDetail) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeDetail) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *VolumeDetail) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "VolumeDetail"}
    +	if s.Size == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Size"))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetSize sets the Size field's value.
    +func (s *VolumeDetail) SetSize(v int64) *VolumeDetail {
    +	s.Size = &v
    +	return s
    +}
    +
    +// Describes a volume status operation code.
    +type VolumeStatusAction struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The code identifying the operation, for example, enable-volume-io.
    +	Code *string `locationName:"code" type:"string"`
    +
    +	// A description of the operation.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of the event associated with this operation.
    +	EventId *string `locationName:"eventId" type:"string"`
    +
    +	// The event type associated with this operation.
    +	EventType *string `locationName:"eventType" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeStatusAction) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeStatusAction) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *VolumeStatusAction) SetCode(v string) *VolumeStatusAction {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *VolumeStatusAction) SetDescription(v string) *VolumeStatusAction {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetEventId sets the EventId field's value.
    +func (s *VolumeStatusAction) SetEventId(v string) *VolumeStatusAction {
    +	s.EventId = &v
    +	return s
    +}
    +
    +// SetEventType sets the EventType field's value.
    +func (s *VolumeStatusAction) SetEventType(v string) *VolumeStatusAction {
    +	s.EventType = &v
    +	return s
    +}
    +
    +// Describes a volume status.
    +type VolumeStatusDetails struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The name of the volume status.
    +	Name *string `locationName:"name" type:"string" enum:"VolumeStatusName"`
    +
    +	// The intended status of the volume status.
    +	Status *string `locationName:"status" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeStatusDetails) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeStatusDetails) GoString() string {
    +	return s.String()
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *VolumeStatusDetails) SetName(v string) *VolumeStatusDetails {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *VolumeStatusDetails) SetStatus(v string) *VolumeStatusDetails {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes a volume status event.
    +type VolumeStatusEvent struct {
    +	_ struct{} `type:"structure"`
    +
    +	// A description of the event.
    +	Description *string `locationName:"description" type:"string"`
    +
    +	// The ID of this event.
    +	EventId *string `locationName:"eventId" type:"string"`
    +
    +	// The type of this event.
    +	EventType *string `locationName:"eventType" type:"string"`
    +
    +	// The latest end time of the event.
    +	NotAfter *time.Time `locationName:"notAfter" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The earliest start time of the event.
    +	NotBefore *time.Time `locationName:"notBefore" type:"timestamp" timestampFormat:"iso8601"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeStatusEvent) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeStatusEvent) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDescription sets the Description field's value.
    +func (s *VolumeStatusEvent) SetDescription(v string) *VolumeStatusEvent {
    +	s.Description = &v
    +	return s
    +}
    +
    +// SetEventId sets the EventId field's value.
    +func (s *VolumeStatusEvent) SetEventId(v string) *VolumeStatusEvent {
    +	s.EventId = &v
    +	return s
    +}
    +
    +// SetEventType sets the EventType field's value.
    +func (s *VolumeStatusEvent) SetEventType(v string) *VolumeStatusEvent {
    +	s.EventType = &v
    +	return s
    +}
    +
    +// SetNotAfter sets the NotAfter field's value.
    +func (s *VolumeStatusEvent) SetNotAfter(v time.Time) *VolumeStatusEvent {
    +	s.NotAfter = &v
    +	return s
    +}
    +
    +// SetNotBefore sets the NotBefore field's value.
    +func (s *VolumeStatusEvent) SetNotBefore(v time.Time) *VolumeStatusEvent {
    +	s.NotBefore = &v
    +	return s
    +}
    +
    +// Describes the status of a volume.
    +type VolumeStatusInfo struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The details of the volume status.
    +	Details []*VolumeStatusDetails `locationName:"details" locationNameList:"item" type:"list"`
    +
    +	// The status of the volume.
    +	Status *string `locationName:"status" type:"string" enum:"VolumeStatusInfoStatus"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeStatusInfo) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeStatusInfo) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDetails sets the Details field's value.
    +func (s *VolumeStatusInfo) SetDetails(v []*VolumeStatusDetails) *VolumeStatusInfo {
    +	s.Details = v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *VolumeStatusInfo) SetStatus(v string) *VolumeStatusInfo {
    +	s.Status = &v
    +	return s
    +}
    +
    +// Describes the volume status.
    +type VolumeStatusItem struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The details of the operation.
    +	Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"`
    +
    +	// The Availability Zone of the volume.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// A list of events associated with the volume.
    +	Events []*VolumeStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"`
    +
    +	// The volume ID.
    +	VolumeId *string `locationName:"volumeId" type:"string"`
    +
    +	// The volume status.
    +	VolumeStatus *VolumeStatusInfo `locationName:"volumeStatus" type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s VolumeStatusItem) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VolumeStatusItem) GoString() string {
    +	return s.String()
    +}
    +
    +// SetActions sets the Actions field's value.
    +func (s *VolumeStatusItem) SetActions(v []*VolumeStatusAction) *VolumeStatusItem {
    +	s.Actions = v
    +	return s
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *VolumeStatusItem) SetAvailabilityZone(v string) *VolumeStatusItem {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetEvents sets the Events field's value.
    +func (s *VolumeStatusItem) SetEvents(v []*VolumeStatusEvent) *VolumeStatusItem {
    +	s.Events = v
    +	return s
    +}
    +
    +// SetVolumeId sets the VolumeId field's value.
    +func (s *VolumeStatusItem) SetVolumeId(v string) *VolumeStatusItem {
    +	s.VolumeId = &v
    +	return s
    +}
    +
    +// SetVolumeStatus sets the VolumeStatus field's value.
    +func (s *VolumeStatusItem) SetVolumeStatus(v *VolumeStatusInfo) *VolumeStatusItem {
    +	s.VolumeStatus = v
    +	return s
    +}
    +
    +// Describes a VPC.
    +type Vpc struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR block for the VPC.
    +	CidrBlock *string `locationName:"cidrBlock" type:"string"`
    +
    +	// The ID of the set of DHCP options you've associated with the VPC (or default
    +	// if the default options are associated with the VPC).
    +	DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"`
    +
    +	// The allowed tenancy of instances launched into the VPC.
    +	InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"`
    +
    +	// Indicates whether the VPC is the default VPC.
    +	IsDefault *bool `locationName:"isDefault" type:"boolean"`
    +
    +	// The current state of the VPC.
    +	State *string `locationName:"state" type:"string" enum:"VpcState"`
    +
    +	// Any tags assigned to the VPC.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s Vpc) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Vpc) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *Vpc) SetCidrBlock(v string) *Vpc {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetDhcpOptionsId sets the DhcpOptionsId field's value.
    +func (s *Vpc) SetDhcpOptionsId(v string) *Vpc {
    +	s.DhcpOptionsId = &v
    +	return s
    +}
    +
    +// SetInstanceTenancy sets the InstanceTenancy field's value.
    +func (s *Vpc) SetInstanceTenancy(v string) *Vpc {
    +	s.InstanceTenancy = &v
    +	return s
    +}
    +
    +// SetIsDefault sets the IsDefault field's value.
    +func (s *Vpc) SetIsDefault(v bool) *Vpc {
    +	s.IsDefault = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *Vpc) SetState(v string) *Vpc {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *Vpc) SetTags(v []*Tag) *Vpc {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *Vpc) SetVpcId(v string) *Vpc {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes an attachment between a virtual private gateway and a VPC.
    +type VpcAttachment struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The current state of the attachment.
    +	State *string `locationName:"state" type:"string" enum:"AttachmentStatus"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpcAttachment) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcAttachment) GoString() string {
    +	return s.String()
    +}
    +
    +// SetState sets the State field's value.
    +func (s *VpcAttachment) SetState(v string) *VpcAttachment {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *VpcAttachment) SetVpcId(v string) *VpcAttachment {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes whether a VPC is enabled for ClassicLink.
    +type VpcClassicLink struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the VPC is enabled for ClassicLink.
    +	ClassicLinkEnabled *bool `locationName:"classicLinkEnabled" type:"boolean"`
    +
    +	// Any tags assigned to the VPC.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpcClassicLink) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcClassicLink) GoString() string {
    +	return s.String()
    +}
    +
    +// SetClassicLinkEnabled sets the ClassicLinkEnabled field's value.
    +func (s *VpcClassicLink) SetClassicLinkEnabled(v bool) *VpcClassicLink {
    +	s.ClassicLinkEnabled = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *VpcClassicLink) SetTags(v []*Tag) *VpcClassicLink {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *VpcClassicLink) SetVpcId(v string) *VpcClassicLink {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a VPC endpoint.
    +type VpcEndpoint struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The date and time the VPC endpoint was created.
    +	CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// The policy document associated with the endpoint.
    +	PolicyDocument *string `locationName:"policyDocument" type:"string"`
    +
    +	// One or more route tables associated with the endpoint.
    +	RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"`
    +
    +	// The name of the AWS service to which the endpoint is associated.
    +	ServiceName *string `locationName:"serviceName" type:"string"`
    +
    +	// The state of the VPC endpoint.
    +	State *string `locationName:"state" type:"string" enum:"State"`
    +
    +	// The ID of the VPC endpoint.
    +	VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"`
    +
    +	// The ID of the VPC to which the endpoint is associated.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpcEndpoint) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcEndpoint) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCreationTimestamp sets the CreationTimestamp field's value.
    +func (s *VpcEndpoint) SetCreationTimestamp(v time.Time) *VpcEndpoint {
    +	s.CreationTimestamp = &v
    +	return s
    +}
    +
    +// SetPolicyDocument sets the PolicyDocument field's value.
    +func (s *VpcEndpoint) SetPolicyDocument(v string) *VpcEndpoint {
    +	s.PolicyDocument = &v
    +	return s
    +}
    +
    +// SetRouteTableIds sets the RouteTableIds field's value.
    +func (s *VpcEndpoint) SetRouteTableIds(v []*string) *VpcEndpoint {
    +	s.RouteTableIds = v
    +	return s
    +}
    +
    +// SetServiceName sets the ServiceName field's value.
    +func (s *VpcEndpoint) SetServiceName(v string) *VpcEndpoint {
    +	s.ServiceName = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *VpcEndpoint) SetState(v string) *VpcEndpoint {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetVpcEndpointId sets the VpcEndpointId field's value.
    +func (s *VpcEndpoint) SetVpcEndpointId(v string) *VpcEndpoint {
    +	s.VpcEndpointId = &v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *VpcEndpoint) SetVpcId(v string) *VpcEndpoint {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a VPC peering connection.
    +type VpcPeeringConnection struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Information about the accepter VPC. CIDR block information is not returned
    +	// when creating a VPC peering connection, or when describing a VPC peering
    +	// connection that's in the initiating-request or pending-acceptance state.
    +	AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"`
    +
    +	// The time that an unaccepted VPC peering connection will expire.
    +	ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"`
    +
    +	// Information about the requester VPC.
    +	RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"`
    +
    +	// The status of the VPC peering connection.
    +	Status *VpcPeeringConnectionStateReason `locationName:"status" type:"structure"`
    +
    +	// Any tags assigned to the resource.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPC peering connection.
    +	VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpcPeeringConnection) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcPeeringConnection) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAccepterVpcInfo sets the AccepterVpcInfo field's value.
    +func (s *VpcPeeringConnection) SetAccepterVpcInfo(v *VpcPeeringConnectionVpcInfo) *VpcPeeringConnection {
    +	s.AccepterVpcInfo = v
    +	return s
    +}
    +
    +// SetExpirationTime sets the ExpirationTime field's value.
    +func (s *VpcPeeringConnection) SetExpirationTime(v time.Time) *VpcPeeringConnection {
    +	s.ExpirationTime = &v
    +	return s
    +}
    +
    +// SetRequesterVpcInfo sets the RequesterVpcInfo field's value.
    +func (s *VpcPeeringConnection) SetRequesterVpcInfo(v *VpcPeeringConnectionVpcInfo) *VpcPeeringConnection {
    +	s.RequesterVpcInfo = v
    +	return s
    +}
    +
    +// SetStatus sets the Status field's value.
    +func (s *VpcPeeringConnection) SetStatus(v *VpcPeeringConnectionStateReason) *VpcPeeringConnection {
    +	s.Status = v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *VpcPeeringConnection) SetTags(v []*Tag) *VpcPeeringConnection {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value.
    +func (s *VpcPeeringConnection) SetVpcPeeringConnectionId(v string) *VpcPeeringConnection {
    +	s.VpcPeeringConnectionId = &v
    +	return s
    +}
    +
    +// Describes the VPC peering connection options.
    +type VpcPeeringConnectionOptionsDescription struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether a local VPC can resolve public DNS hostnames to private
    +	// IP addresses when queried from instances in a peer VPC.
    +	AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"`
    +
    +	// Indicates whether a local ClassicLink connection can communicate with the
    +	// peer VPC over the VPC peering connection.
    +	AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"`
    +
    +	// Indicates whether a local VPC can communicate with a ClassicLink connection
    +	// in the peer VPC over the VPC peering connection.
    +	AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s VpcPeeringConnectionOptionsDescription) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcPeeringConnectionOptionsDescription) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAllowDnsResolutionFromRemoteVpc sets the AllowDnsResolutionFromRemoteVpc field's value.
    +func (s *VpcPeeringConnectionOptionsDescription) SetAllowDnsResolutionFromRemoteVpc(v bool) *VpcPeeringConnectionOptionsDescription {
    +	s.AllowDnsResolutionFromRemoteVpc = &v
    +	return s
    +}
    +
    +// SetAllowEgressFromLocalClassicLinkToRemoteVpc sets the AllowEgressFromLocalClassicLinkToRemoteVpc field's value.
    +func (s *VpcPeeringConnectionOptionsDescription) SetAllowEgressFromLocalClassicLinkToRemoteVpc(v bool) *VpcPeeringConnectionOptionsDescription {
    +	s.AllowEgressFromLocalClassicLinkToRemoteVpc = &v
    +	return s
    +}
    +
    +// SetAllowEgressFromLocalVpcToRemoteClassicLink sets the AllowEgressFromLocalVpcToRemoteClassicLink field's value.
    +func (s *VpcPeeringConnectionOptionsDescription) SetAllowEgressFromLocalVpcToRemoteClassicLink(v bool) *VpcPeeringConnectionOptionsDescription {
    +	s.AllowEgressFromLocalVpcToRemoteClassicLink = &v
    +	return s
    +}
    +
    +// Describes the status of a VPC peering connection.
    +type VpcPeeringConnectionStateReason struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The status of the VPC peering connection.
    +	Code *string `locationName:"code" type:"string" enum:"VpcPeeringConnectionStateReasonCode"`
    +
    +	// A message that provides more information about the status, if applicable.
    +	Message *string `locationName:"message" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpcPeeringConnectionStateReason) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcPeeringConnectionStateReason) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCode sets the Code field's value.
    +func (s *VpcPeeringConnectionStateReason) SetCode(v string) *VpcPeeringConnectionStateReason {
    +	s.Code = &v
    +	return s
    +}
    +
    +// SetMessage sets the Message field's value.
    +func (s *VpcPeeringConnectionStateReason) SetMessage(v string) *VpcPeeringConnectionStateReason {
    +	s.Message = &v
    +	return s
    +}
    +
    +// Describes a VPC in a VPC peering connection.
    +type VpcPeeringConnectionVpcInfo struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR block for the VPC.
    +	CidrBlock *string `locationName:"cidrBlock" type:"string"`
    +
    +	// The AWS account ID of the VPC owner.
    +	OwnerId *string `locationName:"ownerId" type:"string"`
    +
    +	// Information about the VPC peering connection options for the accepter or
    +	// requester VPC.
    +	PeeringOptions *VpcPeeringConnectionOptionsDescription `locationName:"peeringOptions" type:"structure"`
    +
    +	// The ID of the VPC.
    +	VpcId *string `locationName:"vpcId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpcPeeringConnectionVpcInfo) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpcPeeringConnectionVpcInfo) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCidrBlock sets the CidrBlock field's value.
    +func (s *VpcPeeringConnectionVpcInfo) SetCidrBlock(v string) *VpcPeeringConnectionVpcInfo {
    +	s.CidrBlock = &v
    +	return s
    +}
    +
    +// SetOwnerId sets the OwnerId field's value.
    +func (s *VpcPeeringConnectionVpcInfo) SetOwnerId(v string) *VpcPeeringConnectionVpcInfo {
    +	s.OwnerId = &v
    +	return s
    +}
    +
    +// SetPeeringOptions sets the PeeringOptions field's value.
    +func (s *VpcPeeringConnectionVpcInfo) SetPeeringOptions(v *VpcPeeringConnectionOptionsDescription) *VpcPeeringConnectionVpcInfo {
    +	s.PeeringOptions = v
    +	return s
    +}
    +
    +// SetVpcId sets the VpcId field's value.
    +func (s *VpcPeeringConnectionVpcInfo) SetVpcId(v string) *VpcPeeringConnectionVpcInfo {
    +	s.VpcId = &v
    +	return s
    +}
    +
    +// Describes a VPN connection.
    +type VpnConnection struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The configuration information for the VPN connection's customer gateway (in
    +	// the native XML format). This element is always present in the CreateVpnConnection
    +	// response; however, it's present in the DescribeVpnConnections response only
    +	// if the VPN connection is in the pending or available state.
    +	CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"`
    +
    +	// The ID of the customer gateway at your end of the VPN connection.
    +	CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"`
    +
    +	// The VPN connection options.
    +	Options *VpnConnectionOptions `locationName:"options" type:"structure"`
    +
    +	// The static routes associated with the VPN connection.
    +	Routes []*VpnStaticRoute `locationName:"routes" locationNameList:"item" type:"list"`
    +
    +	// The current state of the VPN connection.
    +	State *string `locationName:"state" type:"string" enum:"VpnState"`
    +
    +	// Any tags assigned to the VPN connection.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The type of VPN connection.
    +	Type *string `locationName:"type" type:"string" enum:"GatewayType"`
    +
    +	// Information about the VPN tunnel.
    +	VgwTelemetry []*VgwTelemetry `locationName:"vgwTelemetry" locationNameList:"item" type:"list"`
    +
    +	// The ID of the VPN connection.
    +	VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"`
    +
    +	// The ID of the virtual private gateway at the AWS side of the VPN connection.
    +	VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpnConnection) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpnConnection) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCustomerGatewayConfiguration sets the CustomerGatewayConfiguration field's value.
    +func (s *VpnConnection) SetCustomerGatewayConfiguration(v string) *VpnConnection {
    +	s.CustomerGatewayConfiguration = &v
    +	return s
    +}
    +
    +// SetCustomerGatewayId sets the CustomerGatewayId field's value.
    +func (s *VpnConnection) SetCustomerGatewayId(v string) *VpnConnection {
    +	s.CustomerGatewayId = &v
    +	return s
    +}
    +
    +// SetOptions sets the Options field's value.
    +func (s *VpnConnection) SetOptions(v *VpnConnectionOptions) *VpnConnection {
    +	s.Options = v
    +	return s
    +}
    +
    +// SetRoutes sets the Routes field's value.
    +func (s *VpnConnection) SetRoutes(v []*VpnStaticRoute) *VpnConnection {
    +	s.Routes = v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *VpnConnection) SetState(v string) *VpnConnection {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *VpnConnection) SetTags(v []*Tag) *VpnConnection {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *VpnConnection) SetType(v string) *VpnConnection {
    +	s.Type = &v
    +	return s
    +}
    +
    +// SetVgwTelemetry sets the VgwTelemetry field's value.
    +func (s *VpnConnection) SetVgwTelemetry(v []*VgwTelemetry) *VpnConnection {
    +	s.VgwTelemetry = v
    +	return s
    +}
    +
    +// SetVpnConnectionId sets the VpnConnectionId field's value.
    +func (s *VpnConnection) SetVpnConnectionId(v string) *VpnConnection {
    +	s.VpnConnectionId = &v
    +	return s
    +}
    +
    +// SetVpnGatewayId sets the VpnGatewayId field's value.
    +func (s *VpnConnection) SetVpnGatewayId(v string) *VpnConnection {
    +	s.VpnGatewayId = &v
    +	return s
    +}
    +
    +// Describes VPN connection options.
    +type VpnConnectionOptions struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the VPN connection uses static routes only. Static routes
    +	// must be used for devices that don't support BGP.
    +	StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s VpnConnectionOptions) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpnConnectionOptions) GoString() string {
    +	return s.String()
    +}
    +
    +// SetStaticRoutesOnly sets the StaticRoutesOnly field's value.
    +func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions {
    +	s.StaticRoutesOnly = &v
    +	return s
    +}
    +
    +// Describes VPN connection options.
    +type VpnConnectionOptionsSpecification struct {
    +	_ struct{} `type:"structure"`
    +
    +	// Indicates whether the VPN connection uses static routes only. Static routes
    +	// must be used for devices that don't support BGP.
    +	StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"`
    +}
    +
    +// String returns the string representation
    +func (s VpnConnectionOptionsSpecification) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpnConnectionOptionsSpecification) GoString() string {
    +	return s.String()
    +}
    +
    +// SetStaticRoutesOnly sets the StaticRoutesOnly field's value.
    +func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConnectionOptionsSpecification {
    +	s.StaticRoutesOnly = &v
    +	return s
    +}
    +
    +// Describes a virtual private gateway.
    +type VpnGateway struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Availability Zone where the virtual private gateway was created, if applicable.
    +	// This field may be empty or not returned.
    +	AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
    +
    +	// The current state of the virtual private gateway.
    +	State *string `locationName:"state" type:"string" enum:"VpnState"`
    +
    +	// Any tags assigned to the virtual private gateway.
    +	Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"`
    +
    +	// The type of VPN connection the virtual private gateway supports.
    +	Type *string `locationName:"type" type:"string" enum:"GatewayType"`
    +
    +	// Any VPCs attached to the virtual private gateway.
    +	VpcAttachments []*VpcAttachment `locationName:"attachments" locationNameList:"item" type:"list"`
    +
    +	// The ID of the virtual private gateway.
    +	VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s VpnGateway) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpnGateway) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAvailabilityZone sets the AvailabilityZone field's value.
    +func (s *VpnGateway) SetAvailabilityZone(v string) *VpnGateway {
    +	s.AvailabilityZone = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *VpnGateway) SetState(v string) *VpnGateway {
    +	s.State = &v
    +	return s
    +}
    +
    +// SetTags sets the Tags field's value.
    +func (s *VpnGateway) SetTags(v []*Tag) *VpnGateway {
    +	s.Tags = v
    +	return s
    +}
    +
    +// SetType sets the Type field's value.
    +func (s *VpnGateway) SetType(v string) *VpnGateway {
    +	s.Type = &v
    +	return s
    +}
    +
    +// SetVpcAttachments sets the VpcAttachments field's value.
    +func (s *VpnGateway) SetVpcAttachments(v []*VpcAttachment) *VpnGateway {
    +	s.VpcAttachments = v
    +	return s
    +}
    +
    +// SetVpnGatewayId sets the VpnGatewayId field's value.
    +func (s *VpnGateway) SetVpnGatewayId(v string) *VpnGateway {
    +	s.VpnGatewayId = &v
    +	return s
    +}
    +
    +// Describes a static route for a VPN connection.
    +type VpnStaticRoute struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The CIDR block associated with the local subnet of the customer data center.
    +	DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"`
    +
    +	// Indicates how the routes were provided.
    +	Source *string `locationName:"source" type:"string" enum:"VpnStaticRouteSource"`
    +
    +	// The current state of the static route.
    +	State *string `locationName:"state" type:"string" enum:"VpnState"`
    +}
    +
    +// String returns the string representation
    +func (s VpnStaticRoute) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s VpnStaticRoute) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value.
    +func (s *VpnStaticRoute) SetDestinationCidrBlock(v string) *VpnStaticRoute {
    +	s.DestinationCidrBlock = &v
    +	return s
    +}
    +
    +// SetSource sets the Source field's value.
    +func (s *VpnStaticRoute) SetSource(v string) *VpnStaticRoute {
    +	s.Source = &v
    +	return s
    +}
    +
    +// SetState sets the State field's value.
    +func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute {
    +	s.State = &v
    +	return s
    +}
    +
    +const (
    +	// AccountAttributeNameSupportedPlatforms is a AccountAttributeName enum value
    +	AccountAttributeNameSupportedPlatforms = "supported-platforms"
    +
    +	// AccountAttributeNameDefaultVpc is a AccountAttributeName enum value
    +	AccountAttributeNameDefaultVpc = "default-vpc"
    +)
    +
    +const (
    +	// ActivityStatusError is a ActivityStatus enum value
    +	ActivityStatusError = "error"
    +
    +	// ActivityStatusPendingFulfillment is a ActivityStatus enum value
    +	ActivityStatusPendingFulfillment = "pending_fulfillment"
    +
    +	// ActivityStatusPendingTermination is a ActivityStatus enum value
    +	ActivityStatusPendingTermination = "pending_termination"
    +
    +	// ActivityStatusFulfilled is a ActivityStatus enum value
    +	ActivityStatusFulfilled = "fulfilled"
    +)
    +
    +const (
    +	// AffinityDefault is a Affinity enum value
    +	AffinityDefault = "default"
    +
    +	// AffinityHost is a Affinity enum value
    +	AffinityHost = "host"
    +)
    +
    +const (
    +	// AllocationStateAvailable is a AllocationState enum value
    +	AllocationStateAvailable = "available"
    +
    +	// AllocationStateUnderAssessment is a AllocationState enum value
    +	AllocationStateUnderAssessment = "under-assessment"
    +
    +	// AllocationStatePermanentFailure is a AllocationState enum value
    +	AllocationStatePermanentFailure = "permanent-failure"
    +
    +	// AllocationStateReleased is a AllocationState enum value
    +	AllocationStateReleased = "released"
    +
    +	// AllocationStateReleasedPermanentFailure is a AllocationState enum value
    +	AllocationStateReleasedPermanentFailure = "released-permanent-failure"
    +)
    +
    +const (
    +	// AllocationStrategyLowestPrice is a AllocationStrategy enum value
    +	AllocationStrategyLowestPrice = "lowestPrice"
    +
    +	// AllocationStrategyDiversified is a AllocationStrategy enum value
    +	AllocationStrategyDiversified = "diversified"
    +)
    +
    +const (
    +	// ArchitectureValuesI386 is a ArchitectureValues enum value
    +	ArchitectureValuesI386 = "i386"
    +
    +	// ArchitectureValuesX8664 is a ArchitectureValues enum value
    +	ArchitectureValuesX8664 = "x86_64"
    +)
    +
    +const (
    +	// AttachmentStatusAttaching is a AttachmentStatus enum value
    +	AttachmentStatusAttaching = "attaching"
    +
    +	// AttachmentStatusAttached is a AttachmentStatus enum value
    +	AttachmentStatusAttached = "attached"
    +
    +	// AttachmentStatusDetaching is a AttachmentStatus enum value
    +	AttachmentStatusDetaching = "detaching"
    +
    +	// AttachmentStatusDetached is a AttachmentStatus enum value
    +	AttachmentStatusDetached = "detached"
    +)
    +
    +const (
    +	// AutoPlacementOn is a AutoPlacement enum value
    +	AutoPlacementOn = "on"
    +
    +	// AutoPlacementOff is a AutoPlacement enum value
    +	AutoPlacementOff = "off"
    +)
    +
    +const (
    +	// AvailabilityZoneStateAvailable is a AvailabilityZoneState enum value
    +	AvailabilityZoneStateAvailable = "available"
    +
    +	// AvailabilityZoneStateInformation is a AvailabilityZoneState enum value
    +	AvailabilityZoneStateInformation = "information"
    +
    +	// AvailabilityZoneStateImpaired is a AvailabilityZoneState enum value
    +	AvailabilityZoneStateImpaired = "impaired"
    +
    +	// AvailabilityZoneStateUnavailable is a AvailabilityZoneState enum value
    +	AvailabilityZoneStateUnavailable = "unavailable"
    +)
    +
    +const (
    +	// BatchStateSubmitted is a BatchState enum value
    +	BatchStateSubmitted = "submitted"
    +
    +	// BatchStateActive is a BatchState enum value
    +	BatchStateActive = "active"
    +
    +	// BatchStateCancelled is a BatchState enum value
    +	BatchStateCancelled = "cancelled"
    +
    +	// BatchStateFailed is a BatchState enum value
    +	BatchStateFailed = "failed"
    +
    +	// BatchStateCancelledRunning is a BatchState enum value
    +	BatchStateCancelledRunning = "cancelled_running"
    +
    +	// BatchStateCancelledTerminating is a BatchState enum value
    +	BatchStateCancelledTerminating = "cancelled_terminating"
    +
    +	// BatchStateModifying is a BatchState enum value
    +	BatchStateModifying = "modifying"
    +)
    +
    +const (
    +	// BundleTaskStatePending is a BundleTaskState enum value
    +	BundleTaskStatePending = "pending"
    +
    +	// BundleTaskStateWaitingForShutdown is a BundleTaskState enum value
    +	BundleTaskStateWaitingForShutdown = "waiting-for-shutdown"
    +
    +	// BundleTaskStateBundling is a BundleTaskState enum value
    +	BundleTaskStateBundling = "bundling"
    +
    +	// BundleTaskStateStoring is a BundleTaskState enum value
    +	BundleTaskStateStoring = "storing"
    +
    +	// BundleTaskStateCancelling is a BundleTaskState enum value
    +	BundleTaskStateCancelling = "cancelling"
    +
    +	// BundleTaskStateComplete is a BundleTaskState enum value
    +	BundleTaskStateComplete = "complete"
    +
    +	// BundleTaskStateFailed is a BundleTaskState enum value
    +	BundleTaskStateFailed = "failed"
    +)
    +
    +const (
    +	// CancelBatchErrorCodeFleetRequestIdDoesNotExist is a CancelBatchErrorCode enum value
    +	CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist"
    +
    +	// CancelBatchErrorCodeFleetRequestIdMalformed is a CancelBatchErrorCode enum value
    +	CancelBatchErrorCodeFleetRequestIdMalformed = "fleetRequestIdMalformed"
    +
    +	// CancelBatchErrorCodeFleetRequestNotInCancellableState is a CancelBatchErrorCode enum value
    +	CancelBatchErrorCodeFleetRequestNotInCancellableState = "fleetRequestNotInCancellableState"
    +
    +	// CancelBatchErrorCodeUnexpectedError is a CancelBatchErrorCode enum value
    +	CancelBatchErrorCodeUnexpectedError = "unexpectedError"
    +)
    +
    +const (
    +	// CancelSpotInstanceRequestStateActive is a CancelSpotInstanceRequestState enum value
    +	CancelSpotInstanceRequestStateActive = "active"
    +
    +	// CancelSpotInstanceRequestStateOpen is a CancelSpotInstanceRequestState enum value
    +	CancelSpotInstanceRequestStateOpen = "open"
    +
    +	// CancelSpotInstanceRequestStateClosed is a CancelSpotInstanceRequestState enum value
    +	CancelSpotInstanceRequestStateClosed = "closed"
    +
    +	// CancelSpotInstanceRequestStateCancelled is a CancelSpotInstanceRequestState enum value
    +	CancelSpotInstanceRequestStateCancelled = "cancelled"
    +
    +	// CancelSpotInstanceRequestStateCompleted is a CancelSpotInstanceRequestState enum value
    +	CancelSpotInstanceRequestStateCompleted = "completed"
    +)
    +
    +const (
    +	// ContainerFormatOva is a ContainerFormat enum value
    +	ContainerFormatOva = "ova"
    +)
    +
    +const (
    +	// ConversionTaskStateActive is a ConversionTaskState enum value
    +	ConversionTaskStateActive = "active"
    +
    +	// ConversionTaskStateCancelling is a ConversionTaskState enum value
    +	ConversionTaskStateCancelling = "cancelling"
    +
    +	// ConversionTaskStateCancelled is a ConversionTaskState enum value
    +	ConversionTaskStateCancelled = "cancelled"
    +
    +	// ConversionTaskStateCompleted is a ConversionTaskState enum value
    +	ConversionTaskStateCompleted = "completed"
    +)
    +
    +const (
    +	// CurrencyCodeValuesUsd is a CurrencyCodeValues enum value
    +	CurrencyCodeValuesUsd = "USD"
    +)
    +
    +const (
    +	// DatafeedSubscriptionStateActive is a DatafeedSubscriptionState enum value
    +	DatafeedSubscriptionStateActive = "Active"
    +
    +	// DatafeedSubscriptionStateInactive is a DatafeedSubscriptionState enum value
    +	DatafeedSubscriptionStateInactive = "Inactive"
    +)
    +
    +const (
    +	// DeviceTypeEbs is a DeviceType enum value
    +	DeviceTypeEbs = "ebs"
    +
    +	// DeviceTypeInstanceStore is a DeviceType enum value
    +	DeviceTypeInstanceStore = "instance-store"
    +)
    +
    +const (
    +	// DiskImageFormatVmdk is a DiskImageFormat enum value
    +	DiskImageFormatVmdk = "VMDK"
    +
    +	// DiskImageFormatRaw is a DiskImageFormat enum value
    +	DiskImageFormatRaw = "RAW"
    +
    +	// DiskImageFormatVhd is a DiskImageFormat enum value
    +	DiskImageFormatVhd = "VHD"
    +)
    +
    +const (
    +	// DomainTypeVpc is a DomainType enum value
    +	DomainTypeVpc = "vpc"
    +
    +	// DomainTypeStandard is a DomainType enum value
    +	DomainTypeStandard = "standard"
    +)
    +
    +const (
    +	// EventCodeInstanceReboot is a EventCode enum value
    +	EventCodeInstanceReboot = "instance-reboot"
    +
    +	// EventCodeSystemReboot is a EventCode enum value
    +	EventCodeSystemReboot = "system-reboot"
    +
    +	// EventCodeSystemMaintenance is a EventCode enum value
    +	EventCodeSystemMaintenance = "system-maintenance"
    +
    +	// EventCodeInstanceRetirement is a EventCode enum value
    +	EventCodeInstanceRetirement = "instance-retirement"
    +
    +	// EventCodeInstanceStop is a EventCode enum value
    +	EventCodeInstanceStop = "instance-stop"
    +)
    +
    +const (
    +	// EventTypeInstanceChange is a EventType enum value
    +	EventTypeInstanceChange = "instanceChange"
    +
    +	// EventTypeFleetRequestChange is a EventType enum value
    +	EventTypeFleetRequestChange = "fleetRequestChange"
    +
    +	// EventTypeError is a EventType enum value
    +	EventTypeError = "error"
    +)
    +
    +const (
    +	// ExcessCapacityTerminationPolicyNoTermination is a ExcessCapacityTerminationPolicy enum value
    +	ExcessCapacityTerminationPolicyNoTermination = "noTermination"
    +
    +	// ExcessCapacityTerminationPolicyDefault is a ExcessCapacityTerminationPolicy enum value
    +	ExcessCapacityTerminationPolicyDefault = "default"
    +)
    +
    +const (
    +	// ExportEnvironmentCitrix is a ExportEnvironment enum value
    +	ExportEnvironmentCitrix = "citrix"
    +
    +	// ExportEnvironmentVmware is a ExportEnvironment enum value
    +	ExportEnvironmentVmware = "vmware"
    +
    +	// ExportEnvironmentMicrosoft is a ExportEnvironment enum value
    +	ExportEnvironmentMicrosoft = "microsoft"
    +)
    +
    +const (
    +	// ExportTaskStateActive is a ExportTaskState enum value
    +	ExportTaskStateActive = "active"
    +
    +	// ExportTaskStateCancelling is a ExportTaskState enum value
    +	ExportTaskStateCancelling = "cancelling"
    +
    +	// ExportTaskStateCancelled is a ExportTaskState enum value
    +	ExportTaskStateCancelled = "cancelled"
    +
    +	// ExportTaskStateCompleted is a ExportTaskState enum value
    +	ExportTaskStateCompleted = "completed"
    +)
    +
    +const (
    +	// FleetTypeRequest is a FleetType enum value
    +	FleetTypeRequest = "request"
    +
    +	// FleetTypeMaintain is a FleetType enum value
    +	FleetTypeMaintain = "maintain"
    +)
    +
    +const (
    +	// FlowLogsResourceTypeVpc is a FlowLogsResourceType enum value
    +	FlowLogsResourceTypeVpc = "VPC"
    +
    +	// FlowLogsResourceTypeSubnet is a FlowLogsResourceType enum value
    +	FlowLogsResourceTypeSubnet = "Subnet"
    +
    +	// FlowLogsResourceTypeNetworkInterface is a FlowLogsResourceType enum value
    +	FlowLogsResourceTypeNetworkInterface = "NetworkInterface"
    +)
    +
    +const (
    +	// GatewayTypeIpsec1 is a GatewayType enum value
    +	GatewayTypeIpsec1 = "ipsec.1"
    +)
    +
    +const (
    +	// HostTenancyDedicated is a HostTenancy enum value
    +	HostTenancyDedicated = "dedicated"
    +
    +	// HostTenancyHost is a HostTenancy enum value
    +	HostTenancyHost = "host"
    +)
    +
    +const (
    +	// HypervisorTypeOvm is a HypervisorType enum value
    +	HypervisorTypeOvm = "ovm"
    +
    +	// HypervisorTypeXen is a HypervisorType enum value
    +	HypervisorTypeXen = "xen"
    +)
    +
    +const (
    +	// ImageAttributeNameDescription is a ImageAttributeName enum value
    +	ImageAttributeNameDescription = "description"
    +
    +	// ImageAttributeNameKernel is a ImageAttributeName enum value
    +	ImageAttributeNameKernel = "kernel"
    +
    +	// ImageAttributeNameRamdisk is a ImageAttributeName enum value
    +	ImageAttributeNameRamdisk = "ramdisk"
    +
    +	// ImageAttributeNameLaunchPermission is a ImageAttributeName enum value
    +	ImageAttributeNameLaunchPermission = "launchPermission"
    +
    +	// ImageAttributeNameProductCodes is a ImageAttributeName enum value
    +	ImageAttributeNameProductCodes = "productCodes"
    +
    +	// ImageAttributeNameBlockDeviceMapping is a ImageAttributeName enum value
    +	ImageAttributeNameBlockDeviceMapping = "blockDeviceMapping"
    +
    +	// ImageAttributeNameSriovNetSupport is a ImageAttributeName enum value
    +	ImageAttributeNameSriovNetSupport = "sriovNetSupport"
    +)
    +
    +const (
    +	// ImageStatePending is a ImageState enum value
    +	ImageStatePending = "pending"
    +
    +	// ImageStateAvailable is a ImageState enum value
    +	ImageStateAvailable = "available"
    +
    +	// ImageStateInvalid is a ImageState enum value
    +	ImageStateInvalid = "invalid"
    +
    +	// ImageStateDeregistered is a ImageState enum value
    +	ImageStateDeregistered = "deregistered"
    +
    +	// ImageStateTransient is a ImageState enum value
    +	ImageStateTransient = "transient"
    +
    +	// ImageStateFailed is a ImageState enum value
    +	ImageStateFailed = "failed"
    +
    +	// ImageStateError is a ImageState enum value
    +	ImageStateError = "error"
    +)
    +
    +const (
    +	// ImageTypeValuesMachine is a ImageTypeValues enum value
    +	ImageTypeValuesMachine = "machine"
    +
    +	// ImageTypeValuesKernel is a ImageTypeValues enum value
    +	ImageTypeValuesKernel = "kernel"
    +
    +	// ImageTypeValuesRamdisk is a ImageTypeValues enum value
    +	ImageTypeValuesRamdisk = "ramdisk"
    +)
    +
    +const (
    +	// InstanceAttributeNameInstanceType is a InstanceAttributeName enum value
    +	InstanceAttributeNameInstanceType = "instanceType"
    +
    +	// InstanceAttributeNameKernel is a InstanceAttributeName enum value
    +	InstanceAttributeNameKernel = "kernel"
    +
    +	// InstanceAttributeNameRamdisk is a InstanceAttributeName enum value
    +	InstanceAttributeNameRamdisk = "ramdisk"
    +
    +	// InstanceAttributeNameUserData is a InstanceAttributeName enum value
    +	InstanceAttributeNameUserData = "userData"
    +
    +	// InstanceAttributeNameDisableApiTermination is a InstanceAttributeName enum value
    +	InstanceAttributeNameDisableApiTermination = "disableApiTermination"
    +
    +	// InstanceAttributeNameInstanceInitiatedShutdownBehavior is a InstanceAttributeName enum value
    +	InstanceAttributeNameInstanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior"
    +
    +	// InstanceAttributeNameRootDeviceName is a InstanceAttributeName enum value
    +	InstanceAttributeNameRootDeviceName = "rootDeviceName"
    +
    +	// InstanceAttributeNameBlockDeviceMapping is a InstanceAttributeName enum value
    +	InstanceAttributeNameBlockDeviceMapping = "blockDeviceMapping"
    +
    +	// InstanceAttributeNameProductCodes is a InstanceAttributeName enum value
    +	InstanceAttributeNameProductCodes = "productCodes"
    +
    +	// InstanceAttributeNameSourceDestCheck is a InstanceAttributeName enum value
    +	InstanceAttributeNameSourceDestCheck = "sourceDestCheck"
    +
    +	// InstanceAttributeNameGroupSet is a InstanceAttributeName enum value
    +	InstanceAttributeNameGroupSet = "groupSet"
    +
    +	// InstanceAttributeNameEbsOptimized is a InstanceAttributeName enum value
    +	InstanceAttributeNameEbsOptimized = "ebsOptimized"
    +
    +	// InstanceAttributeNameSriovNetSupport is a InstanceAttributeName enum value
    +	InstanceAttributeNameSriovNetSupport = "sriovNetSupport"
    +
    +	// InstanceAttributeNameEnaSupport is a InstanceAttributeName enum value
    +	InstanceAttributeNameEnaSupport = "enaSupport"
    +)
    +
    +const (
    +	// InstanceLifecycleTypeSpot is a InstanceLifecycleType enum value
    +	InstanceLifecycleTypeSpot = "spot"
    +
    +	// InstanceLifecycleTypeScheduled is a InstanceLifecycleType enum value
    +	InstanceLifecycleTypeScheduled = "scheduled"
    +)
    +
    +const (
    +	// InstanceStateNamePending is a InstanceStateName enum value
    +	InstanceStateNamePending = "pending"
    +
    +	// InstanceStateNameRunning is a InstanceStateName enum value
    +	InstanceStateNameRunning = "running"
    +
    +	// InstanceStateNameShuttingDown is a InstanceStateName enum value
    +	InstanceStateNameShuttingDown = "shutting-down"
    +
    +	// InstanceStateNameTerminated is a InstanceStateName enum value
    +	InstanceStateNameTerminated = "terminated"
    +
    +	// InstanceStateNameStopping is a InstanceStateName enum value
    +	InstanceStateNameStopping = "stopping"
    +
    +	// InstanceStateNameStopped is a InstanceStateName enum value
    +	InstanceStateNameStopped = "stopped"
    +)
    +
    +const (
    +	// InstanceTypeT1Micro is a InstanceType enum value
    +	InstanceTypeT1Micro = "t1.micro"
    +
    +	// InstanceTypeT2Nano is a InstanceType enum value
    +	InstanceTypeT2Nano = "t2.nano"
    +
    +	// InstanceTypeT2Micro is a InstanceType enum value
    +	InstanceTypeT2Micro = "t2.micro"
    +
    +	// InstanceTypeT2Small is a InstanceType enum value
    +	InstanceTypeT2Small = "t2.small"
    +
    +	// InstanceTypeT2Medium is a InstanceType enum value
    +	InstanceTypeT2Medium = "t2.medium"
    +
    +	// InstanceTypeT2Large is a InstanceType enum value
    +	InstanceTypeT2Large = "t2.large"
    +
    +	// InstanceTypeM1Small is a InstanceType enum value
    +	InstanceTypeM1Small = "m1.small"
    +
    +	// InstanceTypeM1Medium is a InstanceType enum value
    +	InstanceTypeM1Medium = "m1.medium"
    +
    +	// InstanceTypeM1Large is a InstanceType enum value
    +	InstanceTypeM1Large = "m1.large"
    +
    +	// InstanceTypeM1Xlarge is a InstanceType enum value
    +	InstanceTypeM1Xlarge = "m1.xlarge"
    +
    +	// InstanceTypeM3Medium is a InstanceType enum value
    +	InstanceTypeM3Medium = "m3.medium"
    +
    +	// InstanceTypeM3Large is a InstanceType enum value
    +	InstanceTypeM3Large = "m3.large"
    +
    +	// InstanceTypeM3Xlarge is a InstanceType enum value
    +	InstanceTypeM3Xlarge = "m3.xlarge"
    +
    +	// InstanceTypeM32xlarge is a InstanceType enum value
    +	InstanceTypeM32xlarge = "m3.2xlarge"
    +
    +	// InstanceTypeM4Large is a InstanceType enum value
    +	InstanceTypeM4Large = "m4.large"
    +
    +	// InstanceTypeM4Xlarge is a InstanceType enum value
    +	InstanceTypeM4Xlarge = "m4.xlarge"
    +
    +	// InstanceTypeM42xlarge is a InstanceType enum value
    +	InstanceTypeM42xlarge = "m4.2xlarge"
    +
    +	// InstanceTypeM44xlarge is a InstanceType enum value
    +	InstanceTypeM44xlarge = "m4.4xlarge"
    +
    +	// InstanceTypeM410xlarge is a InstanceType enum value
    +	InstanceTypeM410xlarge = "m4.10xlarge"
    +
    +	// InstanceTypeM416xlarge is a InstanceType enum value
    +	InstanceTypeM416xlarge = "m4.16xlarge"
    +
    +	// InstanceTypeM2Xlarge is a InstanceType enum value
    +	InstanceTypeM2Xlarge = "m2.xlarge"
    +
    +	// InstanceTypeM22xlarge is a InstanceType enum value
    +	InstanceTypeM22xlarge = "m2.2xlarge"
    +
    +	// InstanceTypeM24xlarge is a InstanceType enum value
    +	InstanceTypeM24xlarge = "m2.4xlarge"
    +
    +	// InstanceTypeCr18xlarge is a InstanceType enum value
    +	InstanceTypeCr18xlarge = "cr1.8xlarge"
    +
    +	// InstanceTypeR3Large is a InstanceType enum value
    +	InstanceTypeR3Large = "r3.large"
    +
    +	// InstanceTypeR3Xlarge is a InstanceType enum value
    +	InstanceTypeR3Xlarge = "r3.xlarge"
    +
    +	// InstanceTypeR32xlarge is a InstanceType enum value
    +	InstanceTypeR32xlarge = "r3.2xlarge"
    +
    +	// InstanceTypeR34xlarge is a InstanceType enum value
    +	InstanceTypeR34xlarge = "r3.4xlarge"
    +
    +	// InstanceTypeR38xlarge is a InstanceType enum value
    +	InstanceTypeR38xlarge = "r3.8xlarge"
    +
    +	// InstanceTypeX116xlarge is a InstanceType enum value
    +	InstanceTypeX116xlarge = "x1.16xlarge"
    +
    +	// InstanceTypeX132xlarge is a InstanceType enum value
    +	InstanceTypeX132xlarge = "x1.32xlarge"
    +
    +	// InstanceTypeI2Xlarge is a InstanceType enum value
    +	InstanceTypeI2Xlarge = "i2.xlarge"
    +
    +	// InstanceTypeI22xlarge is a InstanceType enum value
    +	InstanceTypeI22xlarge = "i2.2xlarge"
    +
    +	// InstanceTypeI24xlarge is a InstanceType enum value
    +	InstanceTypeI24xlarge = "i2.4xlarge"
    +
    +	// InstanceTypeI28xlarge is a InstanceType enum value
    +	InstanceTypeI28xlarge = "i2.8xlarge"
    +
    +	// InstanceTypeHi14xlarge is a InstanceType enum value
    +	InstanceTypeHi14xlarge = "hi1.4xlarge"
    +
    +	// InstanceTypeHs18xlarge is a InstanceType enum value
    +	InstanceTypeHs18xlarge = "hs1.8xlarge"
    +
    +	// InstanceTypeC1Medium is a InstanceType enum value
    +	InstanceTypeC1Medium = "c1.medium"
    +
    +	// InstanceTypeC1Xlarge is a InstanceType enum value
    +	InstanceTypeC1Xlarge = "c1.xlarge"
    +
    +	// InstanceTypeC3Large is a InstanceType enum value
    +	InstanceTypeC3Large = "c3.large"
    +
    +	// InstanceTypeC3Xlarge is a InstanceType enum value
    +	InstanceTypeC3Xlarge = "c3.xlarge"
    +
    +	// InstanceTypeC32xlarge is a InstanceType enum value
    +	InstanceTypeC32xlarge = "c3.2xlarge"
    +
    +	// InstanceTypeC34xlarge is a InstanceType enum value
    +	InstanceTypeC34xlarge = "c3.4xlarge"
    +
    +	// InstanceTypeC38xlarge is a InstanceType enum value
    +	InstanceTypeC38xlarge = "c3.8xlarge"
    +
    +	// InstanceTypeC4Large is a InstanceType enum value
    +	InstanceTypeC4Large = "c4.large"
    +
    +	// InstanceTypeC4Xlarge is a InstanceType enum value
    +	InstanceTypeC4Xlarge = "c4.xlarge"
    +
    +	// InstanceTypeC42xlarge is a InstanceType enum value
    +	InstanceTypeC42xlarge = "c4.2xlarge"
    +
    +	// InstanceTypeC44xlarge is a InstanceType enum value
    +	InstanceTypeC44xlarge = "c4.4xlarge"
    +
    +	// InstanceTypeC48xlarge is a InstanceType enum value
    +	InstanceTypeC48xlarge = "c4.8xlarge"
    +
    +	// InstanceTypeCc14xlarge is a InstanceType enum value
    +	InstanceTypeCc14xlarge = "cc1.4xlarge"
    +
    +	// InstanceTypeCc28xlarge is a InstanceType enum value
    +	InstanceTypeCc28xlarge = "cc2.8xlarge"
    +
    +	// InstanceTypeG22xlarge is a InstanceType enum value
    +	InstanceTypeG22xlarge = "g2.2xlarge"
    +
    +	// InstanceTypeG28xlarge is a InstanceType enum value
    +	InstanceTypeG28xlarge = "g2.8xlarge"
    +
    +	// InstanceTypeCg14xlarge is a InstanceType enum value
    +	InstanceTypeCg14xlarge = "cg1.4xlarge"
    +
    +	// InstanceTypeP2Xlarge is a InstanceType enum value
    +	InstanceTypeP2Xlarge = "p2.xlarge"
    +
    +	// InstanceTypeP28xlarge is a InstanceType enum value
    +	InstanceTypeP28xlarge = "p2.8xlarge"
    +
    +	// InstanceTypeP216xlarge is a InstanceType enum value
    +	InstanceTypeP216xlarge = "p2.16xlarge"
    +
    +	// InstanceTypeD2Xlarge is a InstanceType enum value
    +	InstanceTypeD2Xlarge = "d2.xlarge"
    +
    +	// InstanceTypeD22xlarge is a InstanceType enum value
    +	InstanceTypeD22xlarge = "d2.2xlarge"
    +
    +	// InstanceTypeD24xlarge is a InstanceType enum value
    +	InstanceTypeD24xlarge = "d2.4xlarge"
    +
    +	// InstanceTypeD28xlarge is a InstanceType enum value
    +	InstanceTypeD28xlarge = "d2.8xlarge"
    +)
    +
    +const (
    +	// ListingStateAvailable is a ListingState enum value
    +	ListingStateAvailable = "available"
    +
    +	// ListingStateSold is a ListingState enum value
    +	ListingStateSold = "sold"
    +
    +	// ListingStateCancelled is a ListingState enum value
    +	ListingStateCancelled = "cancelled"
    +
    +	// ListingStatePending is a ListingState enum value
    +	ListingStatePending = "pending"
    +)
    +
    +const (
    +	// ListingStatusActive is a ListingStatus enum value
    +	ListingStatusActive = "active"
    +
    +	// ListingStatusPending is a ListingStatus enum value
    +	ListingStatusPending = "pending"
    +
    +	// ListingStatusCancelled is a ListingStatus enum value
    +	ListingStatusCancelled = "cancelled"
    +
    +	// ListingStatusClosed is a ListingStatus enum value
    +	ListingStatusClosed = "closed"
    +)
    +
    +const (
    +	// MonitoringStateDisabled is a MonitoringState enum value
    +	MonitoringStateDisabled = "disabled"
    +
    +	// MonitoringStateDisabling is a MonitoringState enum value
    +	MonitoringStateDisabling = "disabling"
    +
    +	// MonitoringStateEnabled is a MonitoringState enum value
    +	MonitoringStateEnabled = "enabled"
    +
    +	// MonitoringStatePending is a MonitoringState enum value
    +	MonitoringStatePending = "pending"
    +)
    +
    +const (
    +	// MoveStatusMovingToVpc is a MoveStatus enum value
    +	MoveStatusMovingToVpc = "movingToVpc"
    +
    +	// MoveStatusRestoringToClassic is a MoveStatus enum value
    +	MoveStatusRestoringToClassic = "restoringToClassic"
    +)
    +
    +const (
    +	// NatGatewayStatePending is a NatGatewayState enum value
    +	NatGatewayStatePending = "pending"
    +
    +	// NatGatewayStateFailed is a NatGatewayState enum value
    +	NatGatewayStateFailed = "failed"
    +
    +	// NatGatewayStateAvailable is a NatGatewayState enum value
    +	NatGatewayStateAvailable = "available"
    +
    +	// NatGatewayStateDeleting is a NatGatewayState enum value
    +	NatGatewayStateDeleting = "deleting"
    +
    +	// NatGatewayStateDeleted is a NatGatewayState enum value
    +	NatGatewayStateDeleted = "deleted"
    +)
    +
    +const (
    +	// NetworkInterfaceAttributeDescription is a NetworkInterfaceAttribute enum value
    +	NetworkInterfaceAttributeDescription = "description"
    +
    +	// NetworkInterfaceAttributeGroupSet is a NetworkInterfaceAttribute enum value
    +	NetworkInterfaceAttributeGroupSet = "groupSet"
    +
    +	// NetworkInterfaceAttributeSourceDestCheck is a NetworkInterfaceAttribute enum value
    +	NetworkInterfaceAttributeSourceDestCheck = "sourceDestCheck"
    +
    +	// NetworkInterfaceAttributeAttachment is a NetworkInterfaceAttribute enum value
    +	NetworkInterfaceAttributeAttachment = "attachment"
    +)
    +
    +const (
    +	// NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value
    +	NetworkInterfaceStatusAvailable = "available"
    +
    +	// NetworkInterfaceStatusAttaching is a NetworkInterfaceStatus enum value
    +	NetworkInterfaceStatusAttaching = "attaching"
    +
    +	// NetworkInterfaceStatusInUse is a NetworkInterfaceStatus enum value
    +	NetworkInterfaceStatusInUse = "in-use"
    +
    +	// NetworkInterfaceStatusDetaching is a NetworkInterfaceStatus enum value
    +	NetworkInterfaceStatusDetaching = "detaching"
    +)
    +
    +const (
    +	// NetworkInterfaceTypeInterface is a NetworkInterfaceType enum value
    +	NetworkInterfaceTypeInterface = "interface"
    +
    +	// NetworkInterfaceTypeNatGateway is a NetworkInterfaceType enum value
    +	NetworkInterfaceTypeNatGateway = "natGateway"
    +)
    +
    +const (
    +	// OfferingClassTypeStandard is a OfferingClassType enum value
    +	OfferingClassTypeStandard = "standard"
    +
    +	// OfferingClassTypeConvertible is a OfferingClassType enum value
    +	OfferingClassTypeConvertible = "convertible"
    +)
    +
    +const (
    +	// OfferingTypeValuesHeavyUtilization is a OfferingTypeValues enum value
    +	OfferingTypeValuesHeavyUtilization = "Heavy Utilization"
    +
    +	// OfferingTypeValuesMediumUtilization is a OfferingTypeValues enum value
    +	OfferingTypeValuesMediumUtilization = "Medium Utilization"
    +
    +	// OfferingTypeValuesLightUtilization is a OfferingTypeValues enum value
    +	OfferingTypeValuesLightUtilization = "Light Utilization"
    +
    +	// OfferingTypeValuesNoUpfront is a OfferingTypeValues enum value
    +	OfferingTypeValuesNoUpfront = "No Upfront"
    +
    +	// OfferingTypeValuesPartialUpfront is a OfferingTypeValues enum value
    +	OfferingTypeValuesPartialUpfront = "Partial Upfront"
    +
    +	// OfferingTypeValuesAllUpfront is a OfferingTypeValues enum value
    +	OfferingTypeValuesAllUpfront = "All Upfront"
    +)
    +
    +const (
    +	// OperationTypeAdd is a OperationType enum value
    +	OperationTypeAdd = "add"
    +
    +	// OperationTypeRemove is a OperationType enum value
    +	OperationTypeRemove = "remove"
    +)
    +
    +const (
    +	// PaymentOptionAllUpfront is a PaymentOption enum value
    +	PaymentOptionAllUpfront = "AllUpfront"
    +
    +	// PaymentOptionPartialUpfront is a PaymentOption enum value
    +	PaymentOptionPartialUpfront = "PartialUpfront"
    +
    +	// PaymentOptionNoUpfront is a PaymentOption enum value
    +	PaymentOptionNoUpfront = "NoUpfront"
    +)
    +
    +const (
    +	// PermissionGroupAll is a PermissionGroup enum value
    +	PermissionGroupAll = "all"
    +)
    +
    +const (
    +	// PlacementGroupStatePending is a PlacementGroupState enum value
    +	PlacementGroupStatePending = "pending"
    +
    +	// PlacementGroupStateAvailable is a PlacementGroupState enum value
    +	PlacementGroupStateAvailable = "available"
    +
    +	// PlacementGroupStateDeleting is a PlacementGroupState enum value
    +	PlacementGroupStateDeleting = "deleting"
    +
    +	// PlacementGroupStateDeleted is a PlacementGroupState enum value
    +	PlacementGroupStateDeleted = "deleted"
    +)
    +
    +const (
    +	// PlacementStrategyCluster is a PlacementStrategy enum value
    +	PlacementStrategyCluster = "cluster"
    +)
    +
    +const (
    +	// PlatformValuesWindows is a PlatformValues enum value
    +	PlatformValuesWindows = "Windows"
    +)
    +
    +const (
    +	// ProductCodeValuesDevpay is a ProductCodeValues enum value
    +	ProductCodeValuesDevpay = "devpay"
    +
    +	// ProductCodeValuesMarketplace is a ProductCodeValues enum value
    +	ProductCodeValuesMarketplace = "marketplace"
    +)
    +
    +const (
    +	// RIProductDescriptionLinuxUnix is a RIProductDescription enum value
    +	RIProductDescriptionLinuxUnix = "Linux/UNIX"
    +
    +	// RIProductDescriptionLinuxUnixamazonVpc is a RIProductDescription enum value
    +	RIProductDescriptionLinuxUnixamazonVpc = "Linux/UNIX (Amazon VPC)"
    +
    +	// RIProductDescriptionWindows is a RIProductDescription enum value
    +	RIProductDescriptionWindows = "Windows"
    +
    +	// RIProductDescriptionWindowsAmazonVpc is a RIProductDescription enum value
    +	RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)"
    +)
    +
    +const (
    +	// RecurringChargeFrequencyHourly is a RecurringChargeFrequency enum value
    +	RecurringChargeFrequencyHourly = "Hourly"
    +)
    +
    +const (
    +	// ReportInstanceReasonCodesInstanceStuckInState is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state"
    +
    +	// ReportInstanceReasonCodesUnresponsive is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesUnresponsive = "unresponsive"
    +
    +	// ReportInstanceReasonCodesNotAcceptingCredentials is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesNotAcceptingCredentials = "not-accepting-credentials"
    +
    +	// ReportInstanceReasonCodesPasswordNotAvailable is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesPasswordNotAvailable = "password-not-available"
    +
    +	// ReportInstanceReasonCodesPerformanceNetwork is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesPerformanceNetwork = "performance-network"
    +
    +	// ReportInstanceReasonCodesPerformanceInstanceStore is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesPerformanceInstanceStore = "performance-instance-store"
    +
    +	// ReportInstanceReasonCodesPerformanceEbsVolume is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesPerformanceEbsVolume = "performance-ebs-volume"
    +
    +	// ReportInstanceReasonCodesPerformanceOther is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesPerformanceOther = "performance-other"
    +
    +	// ReportInstanceReasonCodesOther is a ReportInstanceReasonCodes enum value
    +	ReportInstanceReasonCodesOther = "other"
    +)
    +
    +const (
    +	// ReportStatusTypeOk is a ReportStatusType enum value
    +	ReportStatusTypeOk = "ok"
    +
    +	// ReportStatusTypeImpaired is a ReportStatusType enum value
    +	ReportStatusTypeImpaired = "impaired"
    +)
    +
    +const (
    +	// ReservationStatePaymentPending is a ReservationState enum value
    +	ReservationStatePaymentPending = "payment-pending"
    +
    +	// ReservationStatePaymentFailed is a ReservationState enum value
    +	ReservationStatePaymentFailed = "payment-failed"
    +
    +	// ReservationStateActive is a ReservationState enum value
    +	ReservationStateActive = "active"
    +
    +	// ReservationStateRetired is a ReservationState enum value
    +	ReservationStateRetired = "retired"
    +)
    +
    +const (
    +	// ReservedInstanceStatePaymentPending is a ReservedInstanceState enum value
    +	ReservedInstanceStatePaymentPending = "payment-pending"
    +
    +	// ReservedInstanceStateActive is a ReservedInstanceState enum value
    +	ReservedInstanceStateActive = "active"
    +
    +	// ReservedInstanceStatePaymentFailed is a ReservedInstanceState enum value
    +	ReservedInstanceStatePaymentFailed = "payment-failed"
    +
    +	// ReservedInstanceStateRetired is a ReservedInstanceState enum value
    +	ReservedInstanceStateRetired = "retired"
    +)
    +
    +const (
    +	// ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value
    +	ResetImageAttributeNameLaunchPermission = "launchPermission"
    +)
    +
    +const (
    +	// ResourceTypeCustomerGateway is a ResourceType enum value
    +	ResourceTypeCustomerGateway = "customer-gateway"
    +
    +	// ResourceTypeDhcpOptions is a ResourceType enum value
    +	ResourceTypeDhcpOptions = "dhcp-options"
    +
    +	// ResourceTypeImage is a ResourceType enum value
    +	ResourceTypeImage = "image"
    +
    +	// ResourceTypeInstance is a ResourceType enum value
    +	ResourceTypeInstance = "instance"
    +
    +	// ResourceTypeInternetGateway is a ResourceType enum value
    +	ResourceTypeInternetGateway = "internet-gateway"
    +
    +	// ResourceTypeNetworkAcl is a ResourceType enum value
    +	ResourceTypeNetworkAcl = "network-acl"
    +
    +	// ResourceTypeNetworkInterface is a ResourceType enum value
    +	ResourceTypeNetworkInterface = "network-interface"
    +
    +	// ResourceTypeReservedInstances is a ResourceType enum value
    +	ResourceTypeReservedInstances = "reserved-instances"
    +
    +	// ResourceTypeRouteTable is a ResourceType enum value
    +	ResourceTypeRouteTable = "route-table"
    +
    +	// ResourceTypeSnapshot is a ResourceType enum value
    +	ResourceTypeSnapshot = "snapshot"
    +
    +	// ResourceTypeSpotInstancesRequest is a ResourceType enum value
    +	ResourceTypeSpotInstancesRequest = "spot-instances-request"
    +
    +	// ResourceTypeSubnet is a ResourceType enum value
    +	ResourceTypeSubnet = "subnet"
    +
    +	// ResourceTypeSecurityGroup is a ResourceType enum value
    +	ResourceTypeSecurityGroup = "security-group"
    +
    +	// ResourceTypeVolume is a ResourceType enum value
    +	ResourceTypeVolume = "volume"
    +
    +	// ResourceTypeVpc is a ResourceType enum value
    +	ResourceTypeVpc = "vpc"
    +
    +	// ResourceTypeVpnConnection is a ResourceType enum value
    +	ResourceTypeVpnConnection = "vpn-connection"
    +
    +	// ResourceTypeVpnGateway is a ResourceType enum value
    +	ResourceTypeVpnGateway = "vpn-gateway"
    +)
    +
    +const (
    +	// RouteOriginCreateRouteTable is a RouteOrigin enum value
    +	RouteOriginCreateRouteTable = "CreateRouteTable"
    +
    +	// RouteOriginCreateRoute is a RouteOrigin enum value
    +	RouteOriginCreateRoute = "CreateRoute"
    +
    +	// RouteOriginEnableVgwRoutePropagation is a RouteOrigin enum value
    +	RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation"
    +)
    +
    +const (
    +	// RouteStateActive is a RouteState enum value
    +	RouteStateActive = "active"
    +
    +	// RouteStateBlackhole is a RouteState enum value
    +	RouteStateBlackhole = "blackhole"
    +)
    +
    +const (
    +	// RuleActionAllow is a RuleAction enum value
    +	RuleActionAllow = "allow"
    +
    +	// RuleActionDeny is a RuleAction enum value
    +	RuleActionDeny = "deny"
    +)
    +
    +const (
    +	// ShutdownBehaviorStop is a ShutdownBehavior enum value
    +	ShutdownBehaviorStop = "stop"
    +
    +	// ShutdownBehaviorTerminate is a ShutdownBehavior enum value
    +	ShutdownBehaviorTerminate = "terminate"
    +)
    +
    +const (
    +	// SnapshotAttributeNameProductCodes is a SnapshotAttributeName enum value
    +	SnapshotAttributeNameProductCodes = "productCodes"
    +
    +	// SnapshotAttributeNameCreateVolumePermission is a SnapshotAttributeName enum value
    +	SnapshotAttributeNameCreateVolumePermission = "createVolumePermission"
    +)
    +
    +const (
    +	// SnapshotStatePending is a SnapshotState enum value
    +	SnapshotStatePending = "pending"
    +
    +	// SnapshotStateCompleted is a SnapshotState enum value
    +	SnapshotStateCompleted = "completed"
    +
    +	// SnapshotStateError is a SnapshotState enum value
    +	SnapshotStateError = "error"
    +)
    +
    +const (
    +	// SpotInstanceStateOpen is a SpotInstanceState enum value
    +	SpotInstanceStateOpen = "open"
    +
    +	// SpotInstanceStateActive is a SpotInstanceState enum value
    +	SpotInstanceStateActive = "active"
    +
    +	// SpotInstanceStateClosed is a SpotInstanceState enum value
    +	SpotInstanceStateClosed = "closed"
    +
    +	// SpotInstanceStateCancelled is a SpotInstanceState enum value
    +	SpotInstanceStateCancelled = "cancelled"
    +
    +	// SpotInstanceStateFailed is a SpotInstanceState enum value
    +	SpotInstanceStateFailed = "failed"
    +)
    +
    +const (
    +	// SpotInstanceTypeOneTime is a SpotInstanceType enum value
    +	SpotInstanceTypeOneTime = "one-time"
    +
    +	// SpotInstanceTypePersistent is a SpotInstanceType enum value
    +	SpotInstanceTypePersistent = "persistent"
    +)
    +
    +const (
    +	// StatePending is a State enum value
    +	StatePending = "Pending"
    +
    +	// StateAvailable is a State enum value
    +	StateAvailable = "Available"
    +
    +	// StateDeleting is a State enum value
    +	StateDeleting = "Deleting"
    +
    +	// StateDeleted is a State enum value
    +	StateDeleted = "Deleted"
    +)
    +
    +const (
    +	// StatusMoveInProgress is a Status enum value
    +	StatusMoveInProgress = "MoveInProgress"
    +
    +	// StatusInVpc is a Status enum value
    +	StatusInVpc = "InVpc"
    +
    +	// StatusInClassic is a Status enum value
    +	StatusInClassic = "InClassic"
    +)
    +
    +const (
    +	// StatusNameReachability is a StatusName enum value
    +	StatusNameReachability = "reachability"
    +)
    +
    +const (
    +	// StatusTypePassed is a StatusType enum value
    +	StatusTypePassed = "passed"
    +
    +	// StatusTypeFailed is a StatusType enum value
    +	StatusTypeFailed = "failed"
    +
    +	// StatusTypeInsufficientData is a StatusType enum value
    +	StatusTypeInsufficientData = "insufficient-data"
    +
    +	// StatusTypeInitializing is a StatusType enum value
    +	StatusTypeInitializing = "initializing"
    +)
    +
    +const (
    +	// SubnetStatePending is a SubnetState enum value
    +	SubnetStatePending = "pending"
    +
    +	// SubnetStateAvailable is a SubnetState enum value
    +	SubnetStateAvailable = "available"
    +)
    +
    +const (
    +	// SummaryStatusOk is a SummaryStatus enum value
    +	SummaryStatusOk = "ok"
    +
    +	// SummaryStatusImpaired is a SummaryStatus enum value
    +	SummaryStatusImpaired = "impaired"
    +
    +	// SummaryStatusInsufficientData is a SummaryStatus enum value
    +	SummaryStatusInsufficientData = "insufficient-data"
    +
    +	// SummaryStatusNotApplicable is a SummaryStatus enum value
    +	SummaryStatusNotApplicable = "not-applicable"
    +
    +	// SummaryStatusInitializing is a SummaryStatus enum value
    +	SummaryStatusInitializing = "initializing"
    +)
    +
    +const (
    +	// TelemetryStatusUp is a TelemetryStatus enum value
    +	TelemetryStatusUp = "UP"
    +
    +	// TelemetryStatusDown is a TelemetryStatus enum value
    +	TelemetryStatusDown = "DOWN"
    +)
    +
    +const (
    +	// TenancyDefault is a Tenancy enum value
    +	TenancyDefault = "default"
    +
    +	// TenancyDedicated is a Tenancy enum value
    +	TenancyDedicated = "dedicated"
    +
    +	// TenancyHost is a Tenancy enum value
    +	TenancyHost = "host"
    +)
    +
    +const (
    +	// TrafficTypeAccept is a TrafficType enum value
    +	TrafficTypeAccept = "ACCEPT"
    +
    +	// TrafficTypeReject is a TrafficType enum value
    +	TrafficTypeReject = "REJECT"
    +
    +	// TrafficTypeAll is a TrafficType enum value
    +	TrafficTypeAll = "ALL"
    +)
    +
    +const (
    +	// VirtualizationTypeHvm is a VirtualizationType enum value
    +	VirtualizationTypeHvm = "hvm"
    +
    +	// VirtualizationTypeParavirtual is a VirtualizationType enum value
    +	VirtualizationTypeParavirtual = "paravirtual"
    +)
    +
    +const (
    +	// VolumeAttachmentStateAttaching is a VolumeAttachmentState enum value
    +	VolumeAttachmentStateAttaching = "attaching"
    +
    +	// VolumeAttachmentStateAttached is a VolumeAttachmentState enum value
    +	VolumeAttachmentStateAttached = "attached"
    +
    +	// VolumeAttachmentStateDetaching is a VolumeAttachmentState enum value
    +	VolumeAttachmentStateDetaching = "detaching"
    +
    +	// VolumeAttachmentStateDetached is a VolumeAttachmentState enum value
    +	VolumeAttachmentStateDetached = "detached"
    +)
    +
    +const (
    +	// VolumeAttributeNameAutoEnableIo is a VolumeAttributeName enum value
    +	VolumeAttributeNameAutoEnableIo = "autoEnableIO"
    +
    +	// VolumeAttributeNameProductCodes is a VolumeAttributeName enum value
    +	VolumeAttributeNameProductCodes = "productCodes"
    +)
    +
    +const (
    +	// VolumeStateCreating is a VolumeState enum value
    +	VolumeStateCreating = "creating"
    +
    +	// VolumeStateAvailable is a VolumeState enum value
    +	VolumeStateAvailable = "available"
    +
    +	// VolumeStateInUse is a VolumeState enum value
    +	VolumeStateInUse = "in-use"
    +
    +	// VolumeStateDeleting is a VolumeState enum value
    +	VolumeStateDeleting = "deleting"
    +
    +	// VolumeStateDeleted is a VolumeState enum value
    +	VolumeStateDeleted = "deleted"
    +
    +	// VolumeStateError is a VolumeState enum value
    +	VolumeStateError = "error"
    +)
    +
    +const (
    +	// VolumeStatusInfoStatusOk is a VolumeStatusInfoStatus enum value
    +	VolumeStatusInfoStatusOk = "ok"
    +
    +	// VolumeStatusInfoStatusImpaired is a VolumeStatusInfoStatus enum value
    +	VolumeStatusInfoStatusImpaired = "impaired"
    +
    +	// VolumeStatusInfoStatusInsufficientData is a VolumeStatusInfoStatus enum value
    +	VolumeStatusInfoStatusInsufficientData = "insufficient-data"
    +)
    +
    +const (
    +	// VolumeStatusNameIoEnabled is a VolumeStatusName enum value
    +	VolumeStatusNameIoEnabled = "io-enabled"
    +
    +	// VolumeStatusNameIoPerformance is a VolumeStatusName enum value
    +	VolumeStatusNameIoPerformance = "io-performance"
    +)
    +
    +const (
    +	// VolumeTypeStandard is a VolumeType enum value
    +	VolumeTypeStandard = "standard"
    +
    +	// VolumeTypeIo1 is a VolumeType enum value
    +	VolumeTypeIo1 = "io1"
    +
    +	// VolumeTypeGp2 is a VolumeType enum value
    +	VolumeTypeGp2 = "gp2"
    +
    +	// VolumeTypeSc1 is a VolumeType enum value
    +	VolumeTypeSc1 = "sc1"
    +
    +	// VolumeTypeSt1 is a VolumeType enum value
    +	VolumeTypeSt1 = "st1"
    +)
    +
    +const (
    +	// VpcAttributeNameEnableDnsSupport is a VpcAttributeName enum value
    +	VpcAttributeNameEnableDnsSupport = "enableDnsSupport"
    +
    +	// VpcAttributeNameEnableDnsHostnames is a VpcAttributeName enum value
    +	VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames"
    +)
    +
    +const (
    +	// VpcPeeringConnectionStateReasonCodeInitiatingRequest is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request"
    +
    +	// VpcPeeringConnectionStateReasonCodePendingAcceptance is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodePendingAcceptance = "pending-acceptance"
    +
    +	// VpcPeeringConnectionStateReasonCodeActive is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeActive = "active"
    +
    +	// VpcPeeringConnectionStateReasonCodeDeleted is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeDeleted = "deleted"
    +
    +	// VpcPeeringConnectionStateReasonCodeRejected is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeRejected = "rejected"
    +
    +	// VpcPeeringConnectionStateReasonCodeFailed is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeFailed = "failed"
    +
    +	// VpcPeeringConnectionStateReasonCodeExpired is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeExpired = "expired"
    +
    +	// VpcPeeringConnectionStateReasonCodeProvisioning is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeProvisioning = "provisioning"
    +
    +	// VpcPeeringConnectionStateReasonCodeDeleting is a VpcPeeringConnectionStateReasonCode enum value
    +	VpcPeeringConnectionStateReasonCodeDeleting = "deleting"
    +)
    +
    +const (
    +	// VpcStatePending is a VpcState enum value
    +	VpcStatePending = "pending"
    +
    +	// VpcStateAvailable is a VpcState enum value
    +	VpcStateAvailable = "available"
    +)
    +
    +const (
    +	// VpnStatePending is a VpnState enum value
    +	VpnStatePending = "pending"
    +
    +	// VpnStateAvailable is a VpnState enum value
    +	VpnStateAvailable = "available"
    +
    +	// VpnStateDeleting is a VpnState enum value
    +	VpnStateDeleting = "deleting"
    +
    +	// VpnStateDeleted is a VpnState enum value
    +	VpnStateDeleted = "deleted"
    +)
    +
    +const (
    +	// VpnStaticRouteSourceStatic is a VpnStaticRouteSource enum value
    +	VpnStaticRouteSourceStatic = "Static"
    +)
    +
    +const (
    +	// ScopeAvailabilityZone is a scope enum value
    +	ScopeAvailabilityZone = "Availability Zone"
    +
    +	// ScopeRegion is a scope enum value
    +	ScopeRegion = "Region"
    +)
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
    new file mode 100644
    index 0000000..36181d9
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
    @@ -0,0 +1,59 @@
    +package ec2
    +
    +import (
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/awsutil"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/private/endpoints"
    +)
    +
    +func init() {
    +	initRequest = func(r *request.Request) {
    +		if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
    +			r.Handlers.Build.PushFront(fillPresignedURL)
    +		}
    +	}
    +}
    +
    +func fillPresignedURL(r *request.Request) {
    +	if !r.ParamsFilled() {
    +		return
    +	}
    +
    +	origParams := r.Params.(*CopySnapshotInput)
    +
    +	// Stop if PresignedURL/DestinationRegion is set
    +	if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil {
    +		return
    +	}
    +
    +	origParams.DestinationRegion = r.Config.Region
    +	newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput)
    +
    +	// Create a new request based on the existing request. We will use this to
    +	// presign the CopySnapshot request against the source region.
    +	cfg := r.Config.Copy(aws.NewConfig().
    +		WithEndpoint("").
    +		WithRegion(aws.StringValue(origParams.SourceRegion)))
    +
    +	clientInfo := r.ClientInfo
    +	clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion(
    +		clientInfo.ServiceName,
    +		aws.StringValue(cfg.Region),
    +		aws.BoolValue(cfg.DisableSSL),
    +		aws.BoolValue(cfg.UseDualStack),
    +	)
    +
    +	// Presign a CopySnapshot request with modified params
    +	req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data)
    +	url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough.
    +	if err != nil {                          // bubble error back up to original request
    +		r.Error = err
    +		return
    +	}
    +
    +	// We have our URL, set it on params
    +	origParams.PresignedUrl = &url
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go
    new file mode 100644
    index 0000000..b30c5e0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go
    @@ -0,0 +1,89 @@
    +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
    +
    +package ec2
    +
    +import (
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/client/metadata"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/aws/signer/v4"
    +	"github.com/aws/aws-sdk-go/private/protocol/ec2query"
    +)
    +
    +// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
    +// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your
    +// need to invest in hardware up front, so you can develop and deploy applications
    +// faster.
    +//The service client's operations are safe to be used concurrently.
    +// It is not safe to mutate any of the client's properties though.
    +type EC2 struct {
    +	*client.Client
    +}
    +
    +// Used for custom client initialization logic
    +var initClient func(*client.Client)
    +
    +// Used for custom request initialization logic
    +var initRequest func(*request.Request)
    +
    +// A ServiceName is the name of the service the client will make API calls to.
    +const ServiceName = "ec2"
    +
    +// New creates a new instance of the EC2 client with a session.
    +// If additional configuration is needed for the client instance use the optional
    +// aws.Config parameter to add your extra config.
    +//
    +// Example:
    +//     // Create a EC2 client from just a session.
    +//     svc := ec2.New(mySession)
    +//
    +//     // Create a EC2 client with additional configuration
    +//     svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
    +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 {
    +	c := p.ClientConfig(ServiceName, cfgs...)
    +	return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
    +}
    +
    +// newClient creates, initializes and returns a new service client instance.
    +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *EC2 {
    +	svc := &EC2{
    +		Client: client.New(
    +			cfg,
    +			metadata.ClientInfo{
    +				ServiceName:   ServiceName,
    +				SigningRegion: signingRegion,
    +				Endpoint:      endpoint,
    +				APIVersion:    "2016-09-15",
    +			},
    +			handlers,
    +		),
    +	}
    +
    +	// Handlers
    +	svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
    +	svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler)
    +	svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler)
    +	svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler)
    +	svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler)
    +
    +	// Run custom client initialization if present
    +	if initClient != nil {
    +		initClient(svc.Client)
    +	}
    +
    +	return svc
    +}
    +
    +// newRequest creates a new request for a EC2 operation and runs any
    +// custom request initialization.
    +func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request {
    +	req := c.NewRequest(op, params, data)
    +
    +	// Run custom request initialization if present
    +	if initRequest != nil {
    +		initRequest(req)
    +	}
    +
    +	return req
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
    new file mode 100644
    index 0000000..94fab6d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
    @@ -0,0 +1,1027 @@
    +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
    +
    +package ec2
    +
    +import (
    +	"github.com/aws/aws-sdk-go/private/waiter"
    +)
    +
    +// WaitUntilBundleTaskComplete uses the Amazon EC2 API operation
    +// DescribeBundleTasks to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeBundleTasks",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "BundleTasks[].State",
    +				Expected: "complete",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "BundleTasks[].State",
    +				Expected: "failed",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilConversionTaskCancelled uses the Amazon EC2 API operation
    +// DescribeConversionTasks to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeConversionTasks",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "ConversionTasks[].State",
    +				Expected: "cancelled",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilConversionTaskCompleted uses the Amazon EC2 API operation
    +// DescribeConversionTasks to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeConversionTasks",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "ConversionTasks[].State",
    +				Expected: "completed",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "ConversionTasks[].State",
    +				Expected: "cancelled",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "ConversionTasks[].State",
    +				Expected: "cancelling",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilConversionTaskDeleted uses the Amazon EC2 API operation
    +// DescribeConversionTasks to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeConversionTasks",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "ConversionTasks[].State",
    +				Expected: "deleted",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilCustomerGatewayAvailable uses the Amazon EC2 API operation
    +// DescribeCustomerGateways to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeCustomerGateways",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "CustomerGateways[].State",
    +				Expected: "available",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "CustomerGateways[].State",
    +				Expected: "deleted",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "CustomerGateways[].State",
    +				Expected: "deleting",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilExportTaskCancelled uses the Amazon EC2 API operation
    +// DescribeExportTasks to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeExportTasks",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "ExportTasks[].State",
    +				Expected: "cancelled",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilExportTaskCompleted uses the Amazon EC2 API operation
    +// DescribeExportTasks to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeExportTasks",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "ExportTasks[].State",
    +				Expected: "completed",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilImageAvailable uses the Amazon EC2 API operation
    +// DescribeImages to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeImages",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Images[].State",
    +				Expected: "available",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Images[].State",
    +				Expected: "failed",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilImageExists uses the Amazon EC2 API operation
    +// DescribeImages to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeImages",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "path",
    +				Argument: "length(Images[]) > `0`",
    +				Expected: true,
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidAMIID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilInstanceExists uses the Amazon EC2 API operation
    +// DescribeInstances to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeInstances",
    +		Delay:       5,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "path",
    +				Argument: "length(Reservations[]) > `0`",
    +				Expected: true,
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidInstanceID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilInstanceRunning uses the Amazon EC2 API operation
    +// DescribeInstances to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeInstances",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "running",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "shutting-down",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "terminated",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "stopping",
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidInstanceID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilInstanceStatusOk uses the Amazon EC2 API operation
    +// DescribeInstanceStatus to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeInstanceStatus",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "InstanceStatuses[].InstanceStatus.Status",
    +				Expected: "ok",
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidInstanceID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilInstanceStopped uses the Amazon EC2 API operation
    +// DescribeInstances to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeInstances",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "stopped",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "pending",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "terminated",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilInstanceTerminated uses the Amazon EC2 API operation
    +// DescribeInstances to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeInstances",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "terminated",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "pending",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Reservations[].Instances[].State.Name",
    +				Expected: "stopping",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilKeyPairExists uses the Amazon EC2 API operation
    +// DescribeKeyPairs to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeKeyPairs",
    +		Delay:       5,
    +		MaxAttempts: 6,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "length(KeyPairs[].KeyName) > `0`",
    +				Expected: true,
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidKeyPair.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilNatGatewayAvailable uses the Amazon EC2 API operation
    +// DescribeNatGateways to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeNatGateways",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "NatGateways[].State",
    +				Expected: "available",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "NatGateways[].State",
    +				Expected: "failed",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "NatGateways[].State",
    +				Expected: "deleting",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "NatGateways[].State",
    +				Expected: "deleted",
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "NatGatewayNotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilNetworkInterfaceAvailable uses the Amazon EC2 API operation
    +// DescribeNetworkInterfaces to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeNetworkInterfaces",
    +		Delay:       20,
    +		MaxAttempts: 10,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "NetworkInterfaces[].Status",
    +				Expected: "available",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidNetworkInterfaceID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilPasswordDataAvailable uses the Amazon EC2 API operation
    +// GetPasswordData to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "GetPasswordData",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "path",
    +				Argument: "length(PasswordData) > `0`",
    +				Expected: true,
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilSnapshotCompleted uses the Amazon EC2 API operation
    +// DescribeSnapshots to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeSnapshots",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Snapshots[].State",
    +				Expected: "completed",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation
    +// DescribeSpotInstanceRequests to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeSpotInstanceRequests",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "SpotInstanceRequests[].Status.Code",
    +				Expected: "fulfilled",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "SpotInstanceRequests[].Status.Code",
    +				Expected: "schedule-expired",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "SpotInstanceRequests[].Status.Code",
    +				Expected: "canceled-before-fulfillment",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "SpotInstanceRequests[].Status.Code",
    +				Expected: "bad-parameters",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "SpotInstanceRequests[].Status.Code",
    +				Expected: "system-error",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilSubnetAvailable uses the Amazon EC2 API operation
    +// DescribeSubnets to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeSubnets",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Subnets[].State",
    +				Expected: "available",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilSystemStatusOk uses the Amazon EC2 API operation
    +// DescribeInstanceStatus to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeInstanceStatus",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "InstanceStatuses[].SystemStatus.Status",
    +				Expected: "ok",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVolumeAvailable uses the Amazon EC2 API operation
    +// DescribeVolumes to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVolumes",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Volumes[].State",
    +				Expected: "available",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Volumes[].State",
    +				Expected: "deleted",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVolumeDeleted uses the Amazon EC2 API operation
    +// DescribeVolumes to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVolumes",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Volumes[].State",
    +				Expected: "deleted",
    +			},
    +			{
    +				State:    "success",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidVolume.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVolumeInUse uses the Amazon EC2 API operation
    +// DescribeVolumes to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVolumes",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Volumes[].State",
    +				Expected: "in-use",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "Volumes[].State",
    +				Expected: "deleted",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVpcAvailable uses the Amazon EC2 API operation
    +// DescribeVpcs to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVpcs",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "Vpcs[].State",
    +				Expected: "available",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVpcExists uses the Amazon EC2 API operation
    +// DescribeVpcs to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVpcs",
    +		Delay:       1,
    +		MaxAttempts: 5,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "status",
    +				Argument: "",
    +				Expected: 200,
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidVpcID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVpcPeeringConnectionExists uses the Amazon EC2 API operation
    +// DescribeVpcPeeringConnections to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVpcPeeringConnections",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "status",
    +				Argument: "",
    +				Expected: 200,
    +			},
    +			{
    +				State:    "retry",
    +				Matcher:  "error",
    +				Argument: "",
    +				Expected: "InvalidVpcPeeringConnectionID.NotFound",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVpnConnectionAvailable uses the Amazon EC2 API operation
    +// DescribeVpnConnections to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVpnConnections",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "VpnConnections[].State",
    +				Expected: "available",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "VpnConnections[].State",
    +				Expected: "deleting",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "VpnConnections[].State",
    +				Expected: "deleted",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    +
    +// WaitUntilVpnConnectionDeleted uses the Amazon EC2 API operation
    +// DescribeVpnConnections to wait for a condition to be met before returning.
    +// If the condition is not meet within the max attempt window an error will
    +// be returned.
    +func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error {
    +	waiterCfg := waiter.Config{
    +		Operation:   "DescribeVpnConnections",
    +		Delay:       15,
    +		MaxAttempts: 40,
    +		Acceptors: []waiter.WaitAcceptor{
    +			{
    +				State:    "success",
    +				Matcher:  "pathAll",
    +				Argument: "VpnConnections[].State",
    +				Expected: "deleted",
    +			},
    +			{
    +				State:    "failure",
    +				Matcher:  "pathAny",
    +				Argument: "VpnConnections[].State",
    +				Expected: "pending",
    +			},
    +		},
    +	}
    +
    +	w := waiter.Waiter{
    +		Client: c,
    +		Input:  input,
    +		Config: waiterCfg,
    +	}
    +	return w.Wait()
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
    new file mode 100644
    index 0000000..7d4e143
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
    @@ -0,0 +1,2242 @@
    +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
    +
    +// Package sts provides a client for AWS Security Token Service.
    +package sts
    +
    +import (
    +	"time"
    +
    +	"github.com/aws/aws-sdk-go/aws/awsutil"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +)
    +
    +const opAssumeRole = "AssumeRole"
    +
    +// AssumeRoleRequest generates a "aws/request.Request" representing the
    +// client's request for the AssumeRole operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssumeRole for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssumeRole method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssumeRoleRequest method.
    +//    req, resp := client.AssumeRoleRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
    +	op := &request.Operation{
    +		Name:       opAssumeRole,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssumeRoleInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AssumeRoleOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssumeRole API operation for AWS Security Token Service.
    +//
    +// Returns a set of temporary security credentials (consisting of an access
    +// key ID, a secret access key, and a security token) that you can use to access
    +// AWS resources that you might not normally have access to. Typically, you
    +// use AssumeRole for cross-account access or federation. For a comparison of
    +// AssumeRole with the other APIs that produce temporary credentials, see Requesting
    +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
    +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
    +// in the IAM User Guide.
    +//
    +// Important: You cannot call AssumeRole by using AWS root account credentials;
    +// access is denied. You must use credentials for an IAM user or an IAM role
    +// to call AssumeRole.
    +//
    +// For cross-account access, imagine that you own multiple accounts and need
    +// to access resources in each account. You could create long-term credentials
    +// in each account to access those resources. However, managing all those credentials
    +// and remembering which one can access which account can be time consuming.
    +// Instead, you can create one set of long-term credentials in one account and
    +// then use temporary security credentials to access all the other accounts
    +// by assuming roles in those accounts. For more information about roles, see
    +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
    +// in the IAM User Guide.
    +//
    +// For federation, you can, for example, grant single sign-on access to the
    +// AWS Management Console. If you already have an identity and authentication
    +// system in your corporate network, you don't have to recreate user identities
    +// in AWS in order to grant those user identities access to AWS. Instead, after
    +// a user has been authenticated, you call AssumeRole (and specify the role
    +// with the appropriate permissions) to get temporary security credentials for
    +// that user. With those temporary security credentials, you construct a sign-in
    +// URL that users can use to access the console. For more information, see Common
    +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
    +// in the IAM User Guide.
    +//
    +// The temporary security credentials are valid for the duration that you specified
    +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
    +// maximum of 3600 seconds (1 hour). The default is 1 hour.
    +//
    +// The temporary security credentials created by AssumeRole can be used to make
    +// API calls to any AWS service with the following exception: you cannot call
    +// the STS service's GetFederationToken or GetSessionToken APIs.
    +//
    +// Optionally, you can pass an IAM access policy to this operation. If you choose
    +// not to pass a policy, the temporary security credentials that are returned
    +// by the operation have the permissions that are defined in the access policy
    +// of the role that is being assumed. If you pass a policy to this operation,
    +// the temporary security credentials that are returned by the operation have
    +// the permissions that are allowed by both the access policy of the role that
    +// is being assumed, and the policy that you pass. This gives you a way to further
    +// restrict the permissions for the resulting temporary security credentials.
    +// You cannot use the passed policy to grant permissions that are in excess
    +// of those allowed by the access policy of the role that is being assumed.
    +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
    +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
    +// in the IAM User Guide.
    +//
    +// To assume a role, your AWS account must be trusted by the role. The trust
    +// relationship is defined in the role's trust policy when the role is created.
    +// That trust policy states which accounts are allowed to delegate access to
    +// this account's role.
    +//
    +// The user who wants to access the role must also have permissions delegated
    +// from the role's administrator. If the user is in a different account than
    +// the role, then the user's administrator must attach a policy that allows
    +// the user to call AssumeRole on the ARN of the role in the other account.
    +// If the user is in the same account as the role, then you can either attach
    +// a policy to the user (identical to the previous different account user),
    +// or you can add the user as a principal directly in the role's trust policy
    +//
    +// Using MFA with AssumeRole
    +//
    +// You can optionally include multi-factor authentication (MFA) information
    +// when you call AssumeRole. This is useful for cross-account scenarios in which
    +// you want to make sure that the user who is assuming the role has been authenticated
    +// using an AWS MFA device. In that scenario, the trust policy of the role being
    +// assumed includes a condition that tests for MFA authentication; if the caller
    +// does not include valid MFA information, the request to assume the role is
    +// denied. The condition in a trust policy that tests for MFA authentication
    +// might look like the following example.
    +//
    +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
    +//
    +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
    +// in the IAM User Guide guide.
    +//
    +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
    +// parameters. The SerialNumber value identifies the user's hardware or virtual
    +// MFA device. The TokenCode is the time-based one-time password (TOTP) that
    +// the MFA devices produces.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation AssumeRole for usage and error information.
    +//
    +// Returned Error Codes:
    +//   * MalformedPolicyDocument
    +//   The request was rejected because the policy document was malformed. The error
    +//   message describes the specific error.
    +//
    +//   * PackedPolicyTooLarge
    +//   The request was rejected because the policy document was too large. The error
    +//   message describes how big the policy document is, in packed form, as a percentage
    +//   of what the API allows.
    +//
    +//   * RegionDisabledException
    +//   STS is not activated in the requested region for the account that is being
    +//   asked to generate credentials. The account administrator must use the IAM
    +//   console to activate STS in that region. For more information, see Activating
    +//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
    +//   in the IAM User Guide.
    +//
    +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
    +	req, out := c.AssumeRoleRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
    +
    +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
    +// client's request for the AssumeRoleWithSAML operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssumeRoleWithSAML for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssumeRoleWithSAML method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssumeRoleWithSAMLRequest method.
    +//    req, resp := client.AssumeRoleWithSAMLRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
    +	op := &request.Operation{
    +		Name:       opAssumeRoleWithSAML,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssumeRoleWithSAMLInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AssumeRoleWithSAMLOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssumeRoleWithSAML API operation for AWS Security Token Service.
    +//
    +// Returns a set of temporary security credentials for users who have been authenticated
    +// via a SAML authentication response. This operation provides a mechanism for
    +// tying an enterprise identity store or directory to role-based AWS access
    +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
    +// with the other APIs that produce temporary credentials, see Requesting Temporary
    +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
    +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
    +// in the IAM User Guide.
    +//
    +// The temporary security credentials returned by this operation consist of
    +// an access key ID, a secret access key, and a security token. Applications
    +// can use these temporary security credentials to sign calls to AWS services.
    +//
    +// The temporary security credentials are valid for the duration that you specified
    +// when calling AssumeRole, or until the time specified in the SAML authentication
    +// response's SessionNotOnOrAfter value, whichever is shorter. The duration
    +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
    +// The default is 1 hour.
    +//
    +// The temporary security credentials created by AssumeRoleWithSAML can be used
    +// to make API calls to any AWS service with the following exception: you cannot
    +// call the STS service's GetFederationToken or GetSessionToken APIs.
    +//
    +// Optionally, you can pass an IAM access policy to this operation. If you choose
    +// not to pass a policy, the temporary security credentials that are returned
    +// by the operation have the permissions that are defined in the access policy
    +// of the role that is being assumed. If you pass a policy to this operation,
    +// the temporary security credentials that are returned by the operation have
    +// the permissions that are allowed by the intersection of both the access policy
    +// of the role that is being assumed, and the policy that you pass. This means
    +// that both policies must grant the permission for the action to be allowed.
    +// This gives you a way to further restrict the permissions for the resulting
    +// temporary security credentials. You cannot use the passed policy to grant
    +// permissions that are in excess of those allowed by the access policy of the
    +// role that is being assumed. For more information, see Permissions for AssumeRole,
    +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
    +// in the IAM User Guide.
    +//
    +// Before your application can call AssumeRoleWithSAML, you must configure your
    +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
    +// you must use AWS Identity and Access Management (IAM) to create a SAML provider
    +// entity in your AWS account that represents your identity provider, and create
    +// an IAM role that specifies this SAML provider in its trust policy.
    +//
    +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
    +// The identity of the caller is validated by using keys in the metadata document
    +// that is uploaded for the SAML provider entity for your identity provider.
    +//
    +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
    +// logs. The entry includes the value in the NameID element of the SAML assertion.
    +// We recommend that you use a NameIDType that is not associated with any personally
    +// identifiable information (PII). For example, you could instead use the Persistent
    +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
    +//
    +// For more information, see the following resources:
    +//
    +//    * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
    +//    in the IAM User Guide.
    +//
    +//    * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
    +//    in the IAM User Guide.
    +//
    +//    * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
    +//    in the IAM User Guide.
    +//
    +//    * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
    +//    in the IAM User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation AssumeRoleWithSAML for usage and error information.
    +//
    +// Returned Error Codes:
    +//   * MalformedPolicyDocument
    +//   The request was rejected because the policy document was malformed. The error
    +//   message describes the specific error.
    +//
    +//   * PackedPolicyTooLarge
    +//   The request was rejected because the policy document was too large. The error
    +//   message describes how big the policy document is, in packed form, as a percentage
    +//   of what the API allows.
    +//
    +//   * IDPRejectedClaim
    +//   The identity provider (IdP) reported that authentication failed. This might
    +//   be because the claim is invalid.
    +//
    +//   If this error is returned for the AssumeRoleWithWebIdentity operation, it
    +//   can also mean that the claim has expired or has been explicitly revoked.
    +//
    +//   * InvalidIdentityToken
    +//   The web identity token that was passed could not be validated by AWS. Get
    +//   a new identity token from the identity provider and then retry the request.
    +//
    +//   * ExpiredTokenException
    +//   The web identity token that was passed is expired or is not valid. Get a
    +//   new identity token from the identity provider and then retry the request.
    +//
    +//   * RegionDisabledException
    +//   STS is not activated in the requested region for the account that is being
    +//   asked to generate credentials. The account administrator must use the IAM
    +//   console to activate STS in that region. For more information, see Activating
    +//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
    +//   in the IAM User Guide.
    +//
    +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
    +	req, out := c.AssumeRoleWithSAMLRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
    +
    +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
    +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See AssumeRoleWithWebIdentity for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the AssumeRoleWithWebIdentity method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
    +//    req, resp := client.AssumeRoleWithWebIdentityRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
    +	op := &request.Operation{
    +		Name:       opAssumeRoleWithWebIdentity,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &AssumeRoleWithWebIdentityInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &AssumeRoleWithWebIdentityOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
    +//
    +// Returns a set of temporary security credentials for users who have been authenticated
    +// in a mobile or web application with a web identity provider, such as Amazon
    +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
    +// identity provider.
    +//
    +// For mobile applications, we recommend that you use Amazon Cognito. You can
    +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
    +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
    +// identify a user and supply the user with a consistent identity throughout
    +// the lifetime of an application.
    +//
    +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
    +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
    +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
    +// in the AWS SDK for iOS Developer Guide.
    +//
    +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
    +// credentials. Therefore, you can distribute an application (for example, on
    +// mobile devices) that requests temporary security credentials without including
    +// long-term AWS credentials in the application, and without deploying server-based
    +// proxy services that use long-term AWS credentials. Instead, the identity
    +// of the caller is validated by using a token from the web identity provider.
    +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
    +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
    +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
    +// in the IAM User Guide.
    +//
    +// The temporary security credentials returned by this API consist of an access
    +// key ID, a secret access key, and a security token. Applications can use these
    +// temporary security credentials to sign calls to AWS service APIs.
    +//
    +// The credentials are valid for the duration that you specified when calling
    +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
    +// a maximum of 3600 seconds (1 hour). The default is 1 hour.
    +//
    +// The temporary security credentials created by AssumeRoleWithWebIdentity can
    +// be used to make API calls to any AWS service with the following exception:
    +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
    +//
    +// Optionally, you can pass an IAM access policy to this operation. If you choose
    +// not to pass a policy, the temporary security credentials that are returned
    +// by the operation have the permissions that are defined in the access policy
    +// of the role that is being assumed. If you pass a policy to this operation,
    +// the temporary security credentials that are returned by the operation have
    +// the permissions that are allowed by both the access policy of the role that
    +// is being assumed, and the policy that you pass. This gives you a way to further
    +// restrict the permissions for the resulting temporary security credentials.
    +// You cannot use the passed policy to grant permissions that are in excess
    +// of those allowed by the access policy of the role that is being assumed.
    +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
    +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
    +// in the IAM User Guide.
    +//
    +// Before your application can call AssumeRoleWithWebIdentity, you must have
    +// an identity token from a supported identity provider and create a role that
    +// the application can assume. The role that your application assumes must trust
    +// the identity provider that is associated with the identity token. In other
    +// words, the identity provider must be specified in the role's trust policy.
    +//
    +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
    +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
    +// of the provided Web Identity Token. We recommend that you avoid using any
    +// personally identifiable information (PII) in this field. For example, you
    +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
    +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
    +//
    +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
    +// API, see the following resources:
    +//
    +//    * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
    +//    and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
    +//
    +//
    +//    *  Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
    +//    This interactive website lets you walk through the process of authenticating
    +//    via Login with Amazon, Facebook, or Google, getting temporary security
    +//    credentials, and then using those credentials to make a request to AWS.
    +//
    +//
    +//    * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
    +//    (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
    +//    apps that show how to invoke the identity providers, and then how to use
    +//    the information from these providers to get and use temporary security
    +//    credentials.
    +//
    +//    * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
    +//    This article discusses web identity federation and shows an example of
    +//    how to use web identity federation to get access to content in Amazon
    +//    S3.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation AssumeRoleWithWebIdentity for usage and error information.
    +//
    +// Returned Error Codes:
    +//   * MalformedPolicyDocument
    +//   The request was rejected because the policy document was malformed. The error
    +//   message describes the specific error.
    +//
    +//   * PackedPolicyTooLarge
    +//   The request was rejected because the policy document was too large. The error
    +//   message describes how big the policy document is, in packed form, as a percentage
    +//   of what the API allows.
    +//
    +//   * IDPRejectedClaim
    +//   The identity provider (IdP) reported that authentication failed. This might
    +//   be because the claim is invalid.
    +//
    +//   If this error is returned for the AssumeRoleWithWebIdentity operation, it
    +//   can also mean that the claim has expired or has been explicitly revoked.
    +//
    +//   * IDPCommunicationError
    +//   The request could not be fulfilled because the non-AWS identity provider
    +//   (IDP) that was asked to verify the incoming identity token could not be reached.
    +//   This is often a transient error caused by network conditions. Retry the request
    +//   a limited number of times so that you don't exceed the request rate. If the
    +//   error persists, the non-AWS identity provider might be down or not responding.
    +//
    +//   * InvalidIdentityToken
    +//   The web identity token that was passed could not be validated by AWS. Get
    +//   a new identity token from the identity provider and then retry the request.
    +//
    +//   * ExpiredTokenException
    +//   The web identity token that was passed is expired or is not valid. Get a
    +//   new identity token from the identity provider and then retry the request.
    +//
    +//   * RegionDisabledException
    +//   STS is not activated in the requested region for the account that is being
    +//   asked to generate credentials. The account administrator must use the IAM
    +//   console to activate STS in that region. For more information, see Activating
    +//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
    +//   in the IAM User Guide.
    +//
    +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
    +	req, out := c.AssumeRoleWithWebIdentityRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
    +
    +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
    +// client's request for the DecodeAuthorizationMessage operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See DecodeAuthorizationMessage for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the DecodeAuthorizationMessage method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the DecodeAuthorizationMessageRequest method.
    +//    req, resp := client.DecodeAuthorizationMessageRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
    +	op := &request.Operation{
    +		Name:       opDecodeAuthorizationMessage,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &DecodeAuthorizationMessageInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &DecodeAuthorizationMessageOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// DecodeAuthorizationMessage API operation for AWS Security Token Service.
    +//
    +// Decodes additional information about the authorization status of a request
    +// from an encoded message returned in response to an AWS request.
    +//
    +// For example, if a user is not authorized to perform an action that he or
    +// she has requested, the request returns a Client.UnauthorizedOperation response
    +// (an HTTP 403 response). Some AWS actions additionally return an encoded message
    +// that can provide details about this authorization failure.
    +//
    +// Only certain AWS actions return an encoded authorization message. The documentation
    +// for an individual action indicates whether that action returns an encoded
    +// message in addition to returning an HTTP code.
    +//
    +// The message is encoded because the details of the authorization status can
    +// constitute privileged information that the user who requested the action
    +// should not see. To decode an authorization status message, a user must be
    +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
    +// (sts:DecodeAuthorizationMessage) action.
    +//
    +// The decoded message includes the following type of information:
    +//
    +//    * Whether the request was denied due to an explicit deny or due to the
    +//    absence of an explicit allow. For more information, see Determining Whether
    +//    a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
    +//    in the IAM User Guide.
    +//
    +//    * The principal who made the request.
    +//
    +//    * The requested action.
    +//
    +//    * The requested resource.
    +//
    +//    * The values of condition keys in the context of the user's request.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation DecodeAuthorizationMessage for usage and error information.
    +//
    +// Returned Error Codes:
    +//   * InvalidAuthorizationMessageException
    +//   The error returned if the message passed to DecodeAuthorizationMessage was
    +//   invalid. This can happen if the token contains invalid characters, such as
    +//   linebreaks.
    +//
    +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
    +	req, out := c.DecodeAuthorizationMessageRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetCallerIdentity = "GetCallerIdentity"
    +
    +// GetCallerIdentityRequest generates a "aws/request.Request" representing the
    +// client's request for the GetCallerIdentity operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetCallerIdentity for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetCallerIdentity method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetCallerIdentityRequest method.
    +//    req, resp := client.GetCallerIdentityRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
    +	op := &request.Operation{
    +		Name:       opGetCallerIdentity,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetCallerIdentityInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetCallerIdentityOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetCallerIdentity API operation for AWS Security Token Service.
    +//
    +// Returns details about the IAM identity whose credentials are used to call
    +// the API.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation GetCallerIdentity for usage and error information.
    +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
    +	req, out := c.GetCallerIdentityRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetFederationToken = "GetFederationToken"
    +
    +// GetFederationTokenRequest generates a "aws/request.Request" representing the
    +// client's request for the GetFederationToken operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetFederationToken for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetFederationToken method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetFederationTokenRequest method.
    +//    req, resp := client.GetFederationTokenRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
    +	op := &request.Operation{
    +		Name:       opGetFederationToken,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetFederationTokenInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetFederationTokenOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetFederationToken API operation for AWS Security Token Service.
    +//
    +// Returns a set of temporary security credentials (consisting of an access
    +// key ID, a secret access key, and a security token) for a federated user.
    +// A typical use is in a proxy application that gets temporary security credentials
    +// on behalf of distributed applications inside a corporate network. Because
    +// you must call the GetFederationToken action using the long-term security
    +// credentials of an IAM user, this call is appropriate in contexts where those
    +// credentials can be safely stored, usually in a server-based application.
    +// For a comparison of GetFederationToken with the other APIs that produce temporary
    +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
    +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
    +// in the IAM User Guide.
    +//
    +// If you are creating a mobile-based or browser-based app that can authenticate
    +// users using a web identity provider like Login with Amazon, Facebook, Google,
    +// or an OpenID Connect-compatible identity provider, we recommend that you
    +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
    +// For more information, see Federation Through a Web-based Identity Provider
    +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
    +//
    +// The GetFederationToken action must be called by using the long-term AWS security
    +// credentials of an IAM user. You can also call GetFederationToken using the
    +// security credentials of an AWS root account, but we do not recommended it.
    +// Instead, we recommend that you create an IAM user for the purpose of the
    +// proxy application and then attach a policy to the IAM user that limits federated
    +// users to only the actions and resources that they need access to. For more
    +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
    +// in the IAM User Guide.
    +//
    +// The temporary security credentials that are obtained by using the long-term
    +// credentials of an IAM user are valid for the specified duration, from 900
    +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
    +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
    +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
    +//
    +// The temporary security credentials created by GetFederationToken can be used
    +// to make API calls to any AWS service with the following exceptions:
    +//
    +//    * You cannot use these credentials to call any IAM APIs.
    +//
    +//    * You cannot call any STS APIs.
    +//
    +// Permissions
    +//
    +// The permissions for the temporary security credentials returned by GetFederationToken
    +// are determined by a combination of the following:
    +//
    +//    * The policy or policies that are attached to the IAM user whose credentials
    +//    are used to call GetFederationToken.
    +//
    +//    * The policy that is passed as a parameter in the call.
    +//
    +// The passed policy is attached to the temporary security credentials that
    +// result from the GetFederationToken API call--that is, to the federated user.
    +// When the federated user makes an AWS request, AWS evaluates the policy attached
    +// to the federated user in combination with the policy or policies attached
    +// to the IAM user whose credentials were used to call GetFederationToken. AWS
    +// allows the federated user's request only when both the federated user and
    +// the IAM user are explicitly allowed to perform the requested action. The
    +// passed policy cannot grant more permissions than those that are defined in
    +// the IAM user policy.
    +//
    +// A typical use case is that the permissions of the IAM user whose credentials
    +// are used to call GetFederationToken are designed to allow access to all the
    +// actions and resources that any federated user will need. Then, for individual
    +// users, you pass a policy to the operation that scopes down the permissions
    +// to a level that's appropriate to that individual user, using a policy that
    +// allows only a subset of permissions that are granted to the IAM user.
    +//
    +// If you do not pass a policy, the resulting temporary security credentials
    +// have no effective permissions. The only exception is when the temporary security
    +// credentials are used to access a resource that has a resource-based policy
    +// that specifically allows the federated user to access the resource.
    +//
    +// For more information about how permissions work, see Permissions for GetFederationToken
    +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
    +// For information about using GetFederationToken to create temporary security
    +// credentials, see GetFederationToken—Federation Through a Custom Identity
    +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation GetFederationToken for usage and error information.
    +//
    +// Returned Error Codes:
    +//   * MalformedPolicyDocument
    +//   The request was rejected because the policy document was malformed. The error
    +//   message describes the specific error.
    +//
    +//   * PackedPolicyTooLarge
    +//   The request was rejected because the policy document was too large. The error
    +//   message describes how big the policy document is, in packed form, as a percentage
    +//   of what the API allows.
    +//
    +//   * RegionDisabledException
    +//   STS is not activated in the requested region for the account that is being
    +//   asked to generate credentials. The account administrator must use the IAM
    +//   console to activate STS in that region. For more information, see Activating
    +//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
    +//   in the IAM User Guide.
    +//
    +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
    +	req, out := c.GetFederationTokenRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +const opGetSessionToken = "GetSessionToken"
    +
    +// GetSessionTokenRequest generates a "aws/request.Request" representing the
    +// client's request for the GetSessionToken operation. The "output" return
    +// value can be used to capture response data after the request's "Send" method
    +// is called.
    +//
    +// See GetSessionToken for usage and error information.
    +//
    +// Creating a request object using this method should be used when you want to inject
    +// custom logic into the request's lifecycle using a custom handler, or if you want to
    +// access properties on the request object before or after sending the request. If
    +// you just want the service response, call the GetSessionToken method directly
    +// instead.
    +//
    +// Note: You must call the "Send" method on the returned request object in order
    +// to execute the request.
    +//
    +//    // Example sending a request using the GetSessionTokenRequest method.
    +//    req, resp := client.GetSessionTokenRequest(params)
    +//
    +//    err := req.Send()
    +//    if err == nil { // resp is now filled
    +//        fmt.Println(resp)
    +//    }
    +//
    +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
    +	op := &request.Operation{
    +		Name:       opGetSessionToken,
    +		HTTPMethod: "POST",
    +		HTTPPath:   "/",
    +	}
    +
    +	if input == nil {
    +		input = &GetSessionTokenInput{}
    +	}
    +
    +	req = c.newRequest(op, input, output)
    +	output = &GetSessionTokenOutput{}
    +	req.Data = output
    +	return
    +}
    +
    +// GetSessionToken API operation for AWS Security Token Service.
    +//
    +// Returns a set of temporary credentials for an AWS account or IAM user. The
    +// credentials consist of an access key ID, a secret access key, and a security
    +// token. Typically, you use GetSessionToken if you want to use MFA to protect
    +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
    +// IAM users would need to call GetSessionToken and submit an MFA code that
    +// is associated with their MFA device. Using the temporary security credentials
    +// that are returned from the call, IAM users can then make programmatic calls
    +// to APIs that require MFA authentication. If you do not supply a correct MFA
    +// code, then the API returns an access denied error. For a comparison of GetSessionToken
    +// with the other APIs that produce temporary credentials, see Requesting Temporary
    +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
    +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
    +// in the IAM User Guide.
    +//
    +// The GetSessionToken action must be called by using the long-term AWS security
    +// credentials of the AWS account or an IAM user. Credentials that are created
    +// by IAM users are valid for the duration that you specify, from 900 seconds
    +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
    +// of 43200 seconds (12 hours); credentials that are created by using account
    +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
    +// seconds (1 hour), with a default of 1 hour.
    +//
    +// The temporary security credentials created by GetSessionToken can be used
    +// to make API calls to any AWS service with the following exceptions:
    +//
    +//    * You cannot call any IAM APIs unless MFA authentication information is
    +//    included in the request.
    +//
    +//    * You cannot call any STS API exceptAssumeRole.
    +//
    +// We recommend that you do not call GetSessionToken with root account credentials.
    +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
    +// by creating one or more IAM users, giving them the necessary permissions,
    +// and using IAM users for everyday interaction with AWS.
    +//
    +// The permissions associated with the temporary security credentials returned
    +// by GetSessionToken are based on the permissions associated with account or
    +// IAM user whose credentials are used to call the action. If GetSessionToken
    +// is called using root account credentials, the temporary credentials have
    +// root account permissions. Similarly, if GetSessionToken is called using the
    +// credentials of an IAM user, the temporary credentials have the same permissions
    +// as the IAM user.
    +//
    +// For more information about using GetSessionToken to create temporary credentials,
    +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
    +// in the IAM User Guide.
    +//
    +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
    +// with awserr.Error's Code and Message methods to get detailed information about
    +// the error.
    +//
    +// See the AWS API reference guide for AWS Security Token Service's
    +// API operation GetSessionToken for usage and error information.
    +//
    +// Returned Error Codes:
    +//   * RegionDisabledException
    +//   STS is not activated in the requested region for the account that is being
    +//   asked to generate credentials. The account administrator must use the IAM
    +//   console to activate STS in that region. For more information, see Activating
    +//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
    +//   in the IAM User Guide.
    +//
    +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
    +	req, out := c.GetSessionTokenRequest(input)
    +	err := req.Send()
    +	return out, err
    +}
    +
    +type AssumeRoleInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The duration, in seconds, of the role session. The value can range from 900
    +	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
    +	// to 3600 seconds.
    +	//
    +	// This is separate from the duration of a console session that you might request
    +	// using the returned credentials. The request to the federation endpoint for
    +	// a console sign-in token takes a SessionDuration parameter that specifies
    +	// the maximum length of the console session, separately from the DurationSeconds
    +	// parameter on this API. For more information, see Creating a URL that Enables
    +	// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
    +	// in the IAM User Guide.
    +	DurationSeconds *int64 `min:"900" type:"integer"`
    +
    +	// A unique identifier that is used by third parties when assuming roles in
    +	// their customers' accounts. For each role that the third party can assume,
    +	// they should instruct their customers to ensure the role's trust policy checks
    +	// for the external ID that the third party generated. Each time the third party
    +	// assumes the role, they should pass the customer's external ID. The external
    +	// ID is useful in order to help third parties bind a role to the customer who
    +	// created it. For more information about the external ID, see How to Use an
    +	// External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
    +	// in the IAM User Guide.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters consisting of upper- and lower-case alphanumeric characters
    +	// with no spaces. You can also include underscores or any of the following
    +	// characters: =,.@:\/-
    +	ExternalId *string `min:"2" type:"string"`
    +
    +	// An IAM policy in JSON format.
    +	//
    +	// This parameter is optional. If you pass a policy, the temporary security
    +	// credentials that are returned by the operation have the permissions that
    +	// are allowed by both (the intersection of) the access policy of the role that
    +	// is being assumed, and the policy that you pass. This gives you a way to further
    +	// restrict the permissions for the resulting temporary security credentials.
    +	// You cannot use the passed policy to grant permissions that are in excess
    +	// of those allowed by the access policy of the role that is being assumed.
    +	// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
    +	// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
    +	// in the IAM User Guide.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters up to 2048 characters in length. The characters can be any
    +	// ASCII character from the space character to the end of the valid character
    +	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
    +	// and carriage return (\u000D) characters.
    +	//
    +	// The policy plain text must be 2048 bytes or shorter. However, an internal
    +	// conversion compresses it into a packed binary format with a separate limit.
    +	// The PackedPolicySize response element indicates by percentage how close to
    +	// the upper size limit the policy is, with 100% equaling the maximum allowed
    +	// size.
    +	Policy *string `min:"1" type:"string"`
    +
    +	// The Amazon Resource Name (ARN) of the role to assume.
    +	//
    +	// RoleArn is a required field
    +	RoleArn *string `min:"20" type:"string" required:"true"`
    +
    +	// An identifier for the assumed role session.
    +	//
    +	// Use the role session name to uniquely identify a session when the same role
    +	// is assumed by different principals or for different reasons. In cross-account
    +	// scenarios, the role session name is visible to, and can be logged by the
    +	// account that owns the role. The role session name is also used in the ARN
    +	// of the assumed role principal. This means that subsequent cross-account API
    +	// requests using the temporary security credentials will expose the role session
    +	// name to the external account in their CloudTrail logs.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters consisting of upper- and lower-case alphanumeric characters
    +	// with no spaces. You can also include underscores or any of the following
    +	// characters: =,.@-
    +	//
    +	// RoleSessionName is a required field
    +	RoleSessionName *string `min:"2" type:"string" required:"true"`
    +
    +	// The identification number of the MFA device that is associated with the user
    +	// who is making the AssumeRole call. Specify this value if the trust policy
    +	// of the role being assumed includes a condition that requires MFA authentication.
    +	// The value is either the serial number for a hardware device (such as GAHT12345678)
    +	// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters consisting of upper- and lower-case alphanumeric characters
    +	// with no spaces. You can also include underscores or any of the following
    +	// characters: =,.@-
    +	SerialNumber *string `min:"9" type:"string"`
    +
    +	// The value provided by the MFA device, if the trust policy of the role being
    +	// assumed requires MFA (that is, if the policy includes a condition that tests
    +	// for MFA). If the role being assumed requires MFA and if the TokenCode value
    +	// is missing or expired, the AssumeRole call returns an "access denied" error.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a sequence
    +	// of six numeric digits.
    +	TokenCode *string `min:"6" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AssumeRoleInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumeRoleInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AssumeRoleInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
    +	if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
    +		invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
    +	}
    +	if s.ExternalId != nil && len(*s.ExternalId) < 2 {
    +		invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
    +	}
    +	if s.Policy != nil && len(*s.Policy) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
    +	}
    +	if s.RoleArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
    +	}
    +	if s.RoleArn != nil && len(*s.RoleArn) < 20 {
    +		invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
    +	}
    +	if s.RoleSessionName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
    +	}
    +	if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
    +		invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
    +	}
    +	if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
    +		invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
    +	}
    +	if s.TokenCode != nil && len(*s.TokenCode) < 6 {
    +		invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDurationSeconds sets the DurationSeconds field's value.
    +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
    +	s.DurationSeconds = &v
    +	return s
    +}
    +
    +// SetExternalId sets the ExternalId field's value.
    +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
    +	s.ExternalId = &v
    +	return s
    +}
    +
    +// SetPolicy sets the Policy field's value.
    +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
    +	s.Policy = &v
    +	return s
    +}
    +
    +// SetRoleArn sets the RoleArn field's value.
    +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
    +	s.RoleArn = &v
    +	return s
    +}
    +
    +// SetRoleSessionName sets the RoleSessionName field's value.
    +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
    +	s.RoleSessionName = &v
    +	return s
    +}
    +
    +// SetSerialNumber sets the SerialNumber field's value.
    +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
    +	s.SerialNumber = &v
    +	return s
    +}
    +
    +// SetTokenCode sets the TokenCode field's value.
    +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
    +	s.TokenCode = &v
    +	return s
    +}
    +
    +// Contains the response to a successful AssumeRole request, including temporary
    +// AWS credentials that can be used to make AWS requests.
    +type AssumeRoleOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
    +	// that you can use to refer to the resulting temporary security credentials.
    +	// For example, you can reference these credentials as a principal in a resource-based
    +	// policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
    +	// that you specified when you called AssumeRole.
    +	AssumedRoleUser *AssumedRoleUser `type:"structure"`
    +
    +	// The temporary security credentials, which include an access key ID, a secret
    +	// access key, and a security (or session) token.
    +	//
    +	// Note: The size of the security token that STS APIs return is not fixed. We
    +	// strongly recommend that you make no assumptions about the maximum size. As
    +	// of this writing, the typical size is less than 4096 bytes, but that can vary.
    +	// Also, future updates to AWS might require larger sizes.
    +	Credentials *Credentials `type:"structure"`
    +
    +	// A percentage value that indicates the size of the policy in packed form.
    +	// The service rejects any policy with a packed size greater than 100 percent,
    +	// which means the policy exceeded the allowed space.
    +	PackedPolicySize *int64 `type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s AssumeRoleOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumeRoleOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssumedRoleUser sets the AssumedRoleUser field's value.
    +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
    +	s.AssumedRoleUser = v
    +	return s
    +}
    +
    +// SetCredentials sets the Credentials field's value.
    +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
    +	s.Credentials = v
    +	return s
    +}
    +
    +// SetPackedPolicySize sets the PackedPolicySize field's value.
    +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
    +	s.PackedPolicySize = &v
    +	return s
    +}
    +
    +type AssumeRoleWithSAMLInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The duration, in seconds, of the role session. The value can range from 900
    +	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
    +	// to 3600 seconds. An expiration can also be specified in the SAML authentication
    +	// response's SessionNotOnOrAfter value. The actual expiration time is whichever
    +	// value is shorter.
    +	//
    +	// This is separate from the duration of a console session that you might request
    +	// using the returned credentials. The request to the federation endpoint for
    +	// a console sign-in token takes a SessionDuration parameter that specifies
    +	// the maximum length of the console session, separately from the DurationSeconds
    +	// parameter on this API. For more information, see Enabling SAML 2.0 Federated
    +	// Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
    +	// in the IAM User Guide.
    +	DurationSeconds *int64 `min:"900" type:"integer"`
    +
    +	// An IAM policy in JSON format.
    +	//
    +	// The policy parameter is optional. If you pass a policy, the temporary security
    +	// credentials that are returned by the operation have the permissions that
    +	// are allowed by both the access policy of the role that is being assumed,
    +	// and the policy that you pass. This gives you a way to further restrict the
    +	// permissions for the resulting temporary security credentials. You cannot
    +	// use the passed policy to grant permissions that are in excess of those allowed
    +	// by the access policy of the role that is being assumed. For more information,
    +	// Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
    +	// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
    +	// in the IAM User Guide.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters up to 2048 characters in length. The characters can be any
    +	// ASCII character from the space character to the end of the valid character
    +	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
    +	// and carriage return (\u000D) characters.
    +	//
    +	// The policy plain text must be 2048 bytes or shorter. However, an internal
    +	// conversion compresses it into a packed binary format with a separate limit.
    +	// The PackedPolicySize response element indicates by percentage how close to
    +	// the upper size limit the policy is, with 100% equaling the maximum allowed
    +	// size.
    +	Policy *string `min:"1" type:"string"`
    +
    +	// The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
    +	// the IdP.
    +	//
    +	// PrincipalArn is a required field
    +	PrincipalArn *string `min:"20" type:"string" required:"true"`
    +
    +	// The Amazon Resource Name (ARN) of the role that the caller is assuming.
    +	//
    +	// RoleArn is a required field
    +	RoleArn *string `min:"20" type:"string" required:"true"`
    +
    +	// The base-64 encoded SAML authentication response provided by the IdP.
    +	//
    +	// For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
    +	// in the Using IAM guide.
    +	//
    +	// SAMLAssertion is a required field
    +	SAMLAssertion *string `min:"4" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AssumeRoleWithSAMLInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumeRoleWithSAMLInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AssumeRoleWithSAMLInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
    +	if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
    +		invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
    +	}
    +	if s.Policy != nil && len(*s.Policy) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
    +	}
    +	if s.PrincipalArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
    +	}
    +	if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
    +		invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
    +	}
    +	if s.RoleArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
    +	}
    +	if s.RoleArn != nil && len(*s.RoleArn) < 20 {
    +		invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
    +	}
    +	if s.SAMLAssertion == nil {
    +		invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
    +	}
    +	if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
    +		invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDurationSeconds sets the DurationSeconds field's value.
    +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
    +	s.DurationSeconds = &v
    +	return s
    +}
    +
    +// SetPolicy sets the Policy field's value.
    +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
    +	s.Policy = &v
    +	return s
    +}
    +
    +// SetPrincipalArn sets the PrincipalArn field's value.
    +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
    +	s.PrincipalArn = &v
    +	return s
    +}
    +
    +// SetRoleArn sets the RoleArn field's value.
    +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
    +	s.RoleArn = &v
    +	return s
    +}
    +
    +// SetSAMLAssertion sets the SAMLAssertion field's value.
    +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
    +	s.SAMLAssertion = &v
    +	return s
    +}
    +
    +// Contains the response to a successful AssumeRoleWithSAML request, including
    +// temporary AWS credentials that can be used to make AWS requests.
    +type AssumeRoleWithSAMLOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The identifiers for the temporary security credentials that the operation
    +	// returns.
    +	AssumedRoleUser *AssumedRoleUser `type:"structure"`
    +
    +	// The value of the Recipient attribute of the SubjectConfirmationData element
    +	// of the SAML assertion.
    +	Audience *string `type:"string"`
    +
    +	// The temporary security credentials, which include an access key ID, a secret
    +	// access key, and a security (or session) token.
    +	//
    +	// Note: The size of the security token that STS APIs return is not fixed. We
    +	// strongly recommend that you make no assumptions about the maximum size. As
    +	// of this writing, the typical size is less than 4096 bytes, but that can vary.
    +	// Also, future updates to AWS might require larger sizes.
    +	Credentials *Credentials `type:"structure"`
    +
    +	// The value of the Issuer element of the SAML assertion.
    +	Issuer *string `type:"string"`
    +
    +	// A hash value based on the concatenation of the Issuer response value, the
    +	// AWS account ID, and the friendly name (the last part of the ARN) of the SAML
    +	// provider in IAM. The combination of NameQualifier and Subject can be used
    +	// to uniquely identify a federated user.
    +	//
    +	// The following pseudocode shows how the hash value is calculated:
    +	//
    +	// BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
    +	// ) )
    +	NameQualifier *string `type:"string"`
    +
    +	// A percentage value that indicates the size of the policy in packed form.
    +	// The service rejects any policy with a packed size greater than 100 percent,
    +	// which means the policy exceeded the allowed space.
    +	PackedPolicySize *int64 `type:"integer"`
    +
    +	// The value of the NameID element in the Subject element of the SAML assertion.
    +	Subject *string `type:"string"`
    +
    +	// The format of the name ID, as defined by the Format attribute in the NameID
    +	// element of the SAML assertion. Typical examples of the format are transient
    +	// or persistent.
    +	//
    +	// If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
    +	// that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
    +	// is returned as transient. If the format includes any other prefix, the format
    +	// is returned with no modifications.
    +	SubjectType *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AssumeRoleWithSAMLOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumeRoleWithSAMLOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssumedRoleUser sets the AssumedRoleUser field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
    +	s.AssumedRoleUser = v
    +	return s
    +}
    +
    +// SetAudience sets the Audience field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
    +	s.Audience = &v
    +	return s
    +}
    +
    +// SetCredentials sets the Credentials field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
    +	s.Credentials = v
    +	return s
    +}
    +
    +// SetIssuer sets the Issuer field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
    +	s.Issuer = &v
    +	return s
    +}
    +
    +// SetNameQualifier sets the NameQualifier field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
    +	s.NameQualifier = &v
    +	return s
    +}
    +
    +// SetPackedPolicySize sets the PackedPolicySize field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
    +	s.PackedPolicySize = &v
    +	return s
    +}
    +
    +// SetSubject sets the Subject field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
    +	s.Subject = &v
    +	return s
    +}
    +
    +// SetSubjectType sets the SubjectType field's value.
    +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
    +	s.SubjectType = &v
    +	return s
    +}
    +
    +type AssumeRoleWithWebIdentityInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The duration, in seconds, of the role session. The value can range from 900
    +	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
    +	// to 3600 seconds.
    +	//
    +	// This is separate from the duration of a console session that you might request
    +	// using the returned credentials. The request to the federation endpoint for
    +	// a console sign-in token takes a SessionDuration parameter that specifies
    +	// the maximum length of the console session, separately from the DurationSeconds
    +	// parameter on this API. For more information, see Creating a URL that Enables
    +	// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
    +	// in the IAM User Guide.
    +	DurationSeconds *int64 `min:"900" type:"integer"`
    +
    +	// An IAM policy in JSON format.
    +	//
    +	// The policy parameter is optional. If you pass a policy, the temporary security
    +	// credentials that are returned by the operation have the permissions that
    +	// are allowed by both the access policy of the role that is being assumed,
    +	// and the policy that you pass. This gives you a way to further restrict the
    +	// permissions for the resulting temporary security credentials. You cannot
    +	// use the passed policy to grant permissions that are in excess of those allowed
    +	// by the access policy of the role that is being assumed. For more information,
    +	// see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
    +	// in the IAM User Guide.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters up to 2048 characters in length. The characters can be any
    +	// ASCII character from the space character to the end of the valid character
    +	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
    +	// and carriage return (\u000D) characters.
    +	//
    +	// The policy plain text must be 2048 bytes or shorter. However, an internal
    +	// conversion compresses it into a packed binary format with a separate limit.
    +	// The PackedPolicySize response element indicates by percentage how close to
    +	// the upper size limit the policy is, with 100% equaling the maximum allowed
    +	// size.
    +	Policy *string `min:"1" type:"string"`
    +
    +	// The fully qualified host component of the domain name of the identity provider.
    +	//
    +	// Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
    +	// and graph.facebook.com are the only supported identity providers for OAuth
    +	// 2.0 access tokens. Do not include URL schemes and port numbers.
    +	//
    +	// Do not specify this value for OpenID Connect ID tokens.
    +	ProviderId *string `min:"4" type:"string"`
    +
    +	// The Amazon Resource Name (ARN) of the role that the caller is assuming.
    +	//
    +	// RoleArn is a required field
    +	RoleArn *string `min:"20" type:"string" required:"true"`
    +
    +	// An identifier for the assumed role session. Typically, you pass the name
    +	// or identifier that is associated with the user who is using your application.
    +	// That way, the temporary security credentials that your application will use
    +	// are associated with that user. This session name is included as part of the
    +	// ARN and assumed role ID in the AssumedRoleUser response element.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters consisting of upper- and lower-case alphanumeric characters
    +	// with no spaces. You can also include underscores or any of the following
    +	// characters: =,.@-
    +	//
    +	// RoleSessionName is a required field
    +	RoleSessionName *string `min:"2" type:"string" required:"true"`
    +
    +	// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
    +	// the identity provider. Your application must get this token by authenticating
    +	// the user who is using your application with a web identity provider before
    +	// the application makes an AssumeRoleWithWebIdentity call.
    +	//
    +	// WebIdentityToken is a required field
    +	WebIdentityToken *string `min:"4" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AssumeRoleWithWebIdentityInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumeRoleWithWebIdentityInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *AssumeRoleWithWebIdentityInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
    +	if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
    +		invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
    +	}
    +	if s.Policy != nil && len(*s.Policy) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
    +	}
    +	if s.ProviderId != nil && len(*s.ProviderId) < 4 {
    +		invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
    +	}
    +	if s.RoleArn == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
    +	}
    +	if s.RoleArn != nil && len(*s.RoleArn) < 20 {
    +		invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
    +	}
    +	if s.RoleSessionName == nil {
    +		invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
    +	}
    +	if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
    +		invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
    +	}
    +	if s.WebIdentityToken == nil {
    +		invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
    +	}
    +	if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
    +		invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDurationSeconds sets the DurationSeconds field's value.
    +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
    +	s.DurationSeconds = &v
    +	return s
    +}
    +
    +// SetPolicy sets the Policy field's value.
    +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
    +	s.Policy = &v
    +	return s
    +}
    +
    +// SetProviderId sets the ProviderId field's value.
    +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
    +	s.ProviderId = &v
    +	return s
    +}
    +
    +// SetRoleArn sets the RoleArn field's value.
    +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
    +	s.RoleArn = &v
    +	return s
    +}
    +
    +// SetRoleSessionName sets the RoleSessionName field's value.
    +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
    +	s.RoleSessionName = &v
    +	return s
    +}
    +
    +// SetWebIdentityToken sets the WebIdentityToken field's value.
    +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
    +	s.WebIdentityToken = &v
    +	return s
    +}
    +
    +// Contains the response to a successful AssumeRoleWithWebIdentity request,
    +// including temporary AWS credentials that can be used to make AWS requests.
    +type AssumeRoleWithWebIdentityOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
    +	// that you can use to refer to the resulting temporary security credentials.
    +	// For example, you can reference these credentials as a principal in a resource-based
    +	// policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
    +	// that you specified when you called AssumeRole.
    +	AssumedRoleUser *AssumedRoleUser `type:"structure"`
    +
    +	// The intended audience (also known as client ID) of the web identity token.
    +	// This is traditionally the client identifier issued to the application that
    +	// requested the web identity token.
    +	Audience *string `type:"string"`
    +
    +	// The temporary security credentials, which include an access key ID, a secret
    +	// access key, and a security token.
    +	//
    +	// Note: The size of the security token that STS APIs return is not fixed. We
    +	// strongly recommend that you make no assumptions about the maximum size. As
    +	// of this writing, the typical size is less than 4096 bytes, but that can vary.
    +	// Also, future updates to AWS might require larger sizes.
    +	Credentials *Credentials `type:"structure"`
    +
    +	// A percentage value that indicates the size of the policy in packed form.
    +	// The service rejects any policy with a packed size greater than 100 percent,
    +	// which means the policy exceeded the allowed space.
    +	PackedPolicySize *int64 `type:"integer"`
    +
    +	// The issuing authority of the web identity token presented. For OpenID Connect
    +	// ID Tokens this contains the value of the iss field. For OAuth 2.0 access
    +	// tokens, this contains the value of the ProviderId parameter that was passed
    +	// in the AssumeRoleWithWebIdentity request.
    +	Provider *string `type:"string"`
    +
    +	// The unique user identifier that is returned by the identity provider. This
    +	// identifier is associated with the WebIdentityToken that was submitted with
    +	// the AssumeRoleWithWebIdentity call. The identifier is typically unique to
    +	// the user and the application that acquired the WebIdentityToken (pairwise
    +	// identifier). For OpenID Connect ID tokens, this field contains the value
    +	// returned by the identity provider as the token's sub (Subject) claim.
    +	SubjectFromWebIdentityToken *string `min:"6" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s AssumeRoleWithWebIdentityOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumeRoleWithWebIdentityOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAssumedRoleUser sets the AssumedRoleUser field's value.
    +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
    +	s.AssumedRoleUser = v
    +	return s
    +}
    +
    +// SetAudience sets the Audience field's value.
    +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
    +	s.Audience = &v
    +	return s
    +}
    +
    +// SetCredentials sets the Credentials field's value.
    +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
    +	s.Credentials = v
    +	return s
    +}
    +
    +// SetPackedPolicySize sets the PackedPolicySize field's value.
    +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
    +	s.PackedPolicySize = &v
    +	return s
    +}
    +
    +// SetProvider sets the Provider field's value.
    +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
    +	s.Provider = &v
    +	return s
    +}
    +
    +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
    +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
    +	s.SubjectFromWebIdentityToken = &v
    +	return s
    +}
    +
    +// The identifiers for the temporary security credentials that the operation
    +// returns.
    +type AssumedRoleUser struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ARN of the temporary security credentials that are returned from the
    +	// AssumeRole action. For more information about ARNs and how to use them in
    +	// policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
    +	// in Using IAM.
    +	//
    +	// Arn is a required field
    +	Arn *string `min:"20" type:"string" required:"true"`
    +
    +	// A unique identifier that contains the role ID and the role session name of
    +	// the role that is being assumed. The role ID is generated by AWS when the
    +	// role is created.
    +	//
    +	// AssumedRoleId is a required field
    +	AssumedRoleId *string `min:"2" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s AssumedRoleUser) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s AssumedRoleUser) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArn sets the Arn field's value.
    +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
    +	s.Arn = &v
    +	return s
    +}
    +
    +// SetAssumedRoleId sets the AssumedRoleId field's value.
    +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
    +	s.AssumedRoleId = &v
    +	return s
    +}
    +
    +// AWS credentials for API authentication.
    +type Credentials struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The access key ID that identifies the temporary security credentials.
    +	//
    +	// AccessKeyId is a required field
    +	AccessKeyId *string `min:"16" type:"string" required:"true"`
    +
    +	// The date on which the current credentials expire.
    +	//
    +	// Expiration is a required field
    +	Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
    +
    +	// The secret access key that can be used to sign requests.
    +	//
    +	// SecretAccessKey is a required field
    +	SecretAccessKey *string `type:"string" required:"true"`
    +
    +	// The token that users must pass to the service API to use the temporary credentials.
    +	//
    +	// SessionToken is a required field
    +	SessionToken *string `type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s Credentials) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s Credentials) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAccessKeyId sets the AccessKeyId field's value.
    +func (s *Credentials) SetAccessKeyId(v string) *Credentials {
    +	s.AccessKeyId = &v
    +	return s
    +}
    +
    +// SetExpiration sets the Expiration field's value.
    +func (s *Credentials) SetExpiration(v time.Time) *Credentials {
    +	s.Expiration = &v
    +	return s
    +}
    +
    +// SetSecretAccessKey sets the SecretAccessKey field's value.
    +func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
    +	s.SecretAccessKey = &v
    +	return s
    +}
    +
    +// SetSessionToken sets the SessionToken field's value.
    +func (s *Credentials) SetSessionToken(v string) *Credentials {
    +	s.SessionToken = &v
    +	return s
    +}
    +
    +type DecodeAuthorizationMessageInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The encoded message that was returned with the response.
    +	//
    +	// EncodedMessage is a required field
    +	EncodedMessage *string `min:"1" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s DecodeAuthorizationMessageInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DecodeAuthorizationMessageInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *DecodeAuthorizationMessageInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
    +	if s.EncodedMessage == nil {
    +		invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
    +	}
    +	if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetEncodedMessage sets the EncodedMessage field's value.
    +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
    +	s.EncodedMessage = &v
    +	return s
    +}
    +
    +// A document that contains additional information about the authorization status
    +// of a request from an encoded message that is returned in response to an AWS
    +// request.
    +type DecodeAuthorizationMessageOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// An XML document that contains the decoded message.
    +	DecodedMessage *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s DecodeAuthorizationMessageOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s DecodeAuthorizationMessageOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetDecodedMessage sets the DecodedMessage field's value.
    +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
    +	s.DecodedMessage = &v
    +	return s
    +}
    +
    +// Identifiers for the federated user that is associated with the credentials.
    +type FederatedUser struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The ARN that specifies the federated user that is associated with the credentials.
    +	// For more information about ARNs and how to use them in policies, see IAM
    +	// Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
    +	// in Using IAM.
    +	//
    +	// Arn is a required field
    +	Arn *string `min:"20" type:"string" required:"true"`
    +
    +	// The string that identifies the federated user associated with the credentials,
    +	// similar to the unique ID of an IAM user.
    +	//
    +	// FederatedUserId is a required field
    +	FederatedUserId *string `min:"2" type:"string" required:"true"`
    +}
    +
    +// String returns the string representation
    +func (s FederatedUser) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s FederatedUser) GoString() string {
    +	return s.String()
    +}
    +
    +// SetArn sets the Arn field's value.
    +func (s *FederatedUser) SetArn(v string) *FederatedUser {
    +	s.Arn = &v
    +	return s
    +}
    +
    +// SetFederatedUserId sets the FederatedUserId field's value.
    +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
    +	s.FederatedUserId = &v
    +	return s
    +}
    +
    +type GetCallerIdentityInput struct {
    +	_ struct{} `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s GetCallerIdentityInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetCallerIdentityInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Contains the response to a successful GetCallerIdentity request, including
    +// information about the entity making the request.
    +type GetCallerIdentityOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The AWS account ID number of the account that owns or contains the calling
    +	// entity.
    +	Account *string `type:"string"`
    +
    +	// The AWS ARN associated with the calling entity.
    +	Arn *string `min:"20" type:"string"`
    +
    +	// The unique identifier of the calling entity. The exact value depends on the
    +	// type of entity making the call. The values returned are those listed in the
    +	// aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
    +	// found on the Policy Variables reference page in the IAM User Guide.
    +	UserId *string `type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GetCallerIdentityOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetCallerIdentityOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetAccount sets the Account field's value.
    +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
    +	s.Account = &v
    +	return s
    +}
    +
    +// SetArn sets the Arn field's value.
    +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
    +	s.Arn = &v
    +	return s
    +}
    +
    +// SetUserId sets the UserId field's value.
    +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
    +	s.UserId = &v
    +	return s
    +}
    +
    +type GetFederationTokenInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The duration, in seconds, that the session should last. Acceptable durations
    +	// for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
    +	// (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
    +	// using AWS account (root) credentials are restricted to a maximum of 3600
    +	// seconds (one hour). If the specified duration is longer than one hour, the
    +	// session obtained by using AWS account (root) credentials defaults to one
    +	// hour.
    +	DurationSeconds *int64 `min:"900" type:"integer"`
    +
    +	// The name of the federated user. The name is used as an identifier for the
    +	// temporary security credentials (such as Bob). For example, you can reference
    +	// the federated user name in a resource-based policy, such as in an Amazon
    +	// S3 bucket policy.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters consisting of upper- and lower-case alphanumeric characters
    +	// with no spaces. You can also include underscores or any of the following
    +	// characters: =,.@-
    +	//
    +	// Name is a required field
    +	Name *string `min:"2" type:"string" required:"true"`
    +
    +	// An IAM policy in JSON format that is passed with the GetFederationToken call
    +	// and evaluated along with the policy or policies that are attached to the
    +	// IAM user whose credentials are used to call GetFederationToken. The passed
    +	// policy is used to scope down the permissions that are available to the IAM
    +	// user, by allowing only a subset of the permissions that are granted to the
    +	// IAM user. The passed policy cannot grant more permissions than those granted
    +	// to the IAM user. The final permissions for the federated user are the most
    +	// restrictive set based on the intersection of the passed policy and the IAM
    +	// user policy.
    +	//
    +	// If you do not pass a policy, the resulting temporary security credentials
    +	// have no effective permissions. The only exception is when the temporary security
    +	// credentials are used to access a resource that has a resource-based policy
    +	// that specifically allows the federated user to access the resource.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters up to 2048 characters in length. The characters can be any
    +	// ASCII character from the space character to the end of the valid character
    +	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
    +	// and carriage return (\u000D) characters.
    +	//
    +	// The policy plain text must be 2048 bytes or shorter. However, an internal
    +	// conversion compresses it into a packed binary format with a separate limit.
    +	// The PackedPolicySize response element indicates by percentage how close to
    +	// the upper size limit the policy is, with 100% equaling the maximum allowed
    +	// size.
    +	//
    +	// For more information about how permissions work, see Permissions for GetFederationToken
    +	// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
    +	Policy *string `min:"1" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GetFederationTokenInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetFederationTokenInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetFederationTokenInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
    +	if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
    +		invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
    +	}
    +	if s.Name == nil {
    +		invalidParams.Add(request.NewErrParamRequired("Name"))
    +	}
    +	if s.Name != nil && len(*s.Name) < 2 {
    +		invalidParams.Add(request.NewErrParamMinLen("Name", 2))
    +	}
    +	if s.Policy != nil && len(*s.Policy) < 1 {
    +		invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDurationSeconds sets the DurationSeconds field's value.
    +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
    +	s.DurationSeconds = &v
    +	return s
    +}
    +
    +// SetName sets the Name field's value.
    +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
    +	s.Name = &v
    +	return s
    +}
    +
    +// SetPolicy sets the Policy field's value.
    +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
    +	s.Policy = &v
    +	return s
    +}
    +
    +// Contains the response to a successful GetFederationToken request, including
    +// temporary AWS credentials that can be used to make AWS requests.
    +type GetFederationTokenOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The temporary security credentials, which include an access key ID, a secret
    +	// access key, and a security (or session) token.
    +	//
    +	// Note: The size of the security token that STS APIs return is not fixed. We
    +	// strongly recommend that you make no assumptions about the maximum size. As
    +	// of this writing, the typical size is less than 4096 bytes, but that can vary.
    +	// Also, future updates to AWS might require larger sizes.
    +	Credentials *Credentials `type:"structure"`
    +
    +	// Identifiers for the federated user associated with the credentials (such
    +	// as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
    +	// can use the federated user's ARN in your resource-based policies, such as
    +	// an Amazon S3 bucket policy.
    +	FederatedUser *FederatedUser `type:"structure"`
    +
    +	// A percentage value indicating the size of the policy in packed form. The
    +	// service rejects policies for which the packed size is greater than 100 percent
    +	// of the allowed value.
    +	PackedPolicySize *int64 `type:"integer"`
    +}
    +
    +// String returns the string representation
    +func (s GetFederationTokenOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetFederationTokenOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCredentials sets the Credentials field's value.
    +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
    +	s.Credentials = v
    +	return s
    +}
    +
    +// SetFederatedUser sets the FederatedUser field's value.
    +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
    +	s.FederatedUser = v
    +	return s
    +}
    +
    +// SetPackedPolicySize sets the PackedPolicySize field's value.
    +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
    +	s.PackedPolicySize = &v
    +	return s
    +}
    +
    +type GetSessionTokenInput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The duration, in seconds, that the credentials should remain valid. Acceptable
    +	// durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
    +	// seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
    +	// for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
    +	// If the duration is longer than one hour, the session for AWS account owners
    +	// defaults to one hour.
    +	DurationSeconds *int64 `min:"900" type:"integer"`
    +
    +	// The identification number of the MFA device that is associated with the IAM
    +	// user who is making the GetSessionToken call. Specify this value if the IAM
    +	// user has a policy that requires MFA authentication. The value is either the
    +	// serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
    +	// Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
    +	// You can find the device for an IAM user by going to the AWS Management Console
    +	// and viewing the user's security credentials.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a string
    +	// of characters consisting of upper- and lower-case alphanumeric characters
    +	// with no spaces. You can also include underscores or any of the following
    +	// characters: =,.@-
    +	SerialNumber *string `min:"9" type:"string"`
    +
    +	// The value provided by the MFA device, if MFA is required. If any policy requires
    +	// the IAM user to submit an MFA code, specify this value. If MFA authentication
    +	// is required, and the user does not provide a code when requesting a set of
    +	// temporary security credentials, the user will receive an "access denied"
    +	// response when requesting resources that require MFA authentication.
    +	//
    +	// The format for this parameter, as described by its regex pattern, is a sequence
    +	// of six numeric digits.
    +	TokenCode *string `min:"6" type:"string"`
    +}
    +
    +// String returns the string representation
    +func (s GetSessionTokenInput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetSessionTokenInput) GoString() string {
    +	return s.String()
    +}
    +
    +// Validate inspects the fields of the type to determine if they are valid.
    +func (s *GetSessionTokenInput) Validate() error {
    +	invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
    +	if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
    +		invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
    +	}
    +	if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
    +		invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
    +	}
    +	if s.TokenCode != nil && len(*s.TokenCode) < 6 {
    +		invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
    +	}
    +
    +	if invalidParams.Len() > 0 {
    +		return invalidParams
    +	}
    +	return nil
    +}
    +
    +// SetDurationSeconds sets the DurationSeconds field's value.
    +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
    +	s.DurationSeconds = &v
    +	return s
    +}
    +
    +// SetSerialNumber sets the SerialNumber field's value.
    +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
    +	s.SerialNumber = &v
    +	return s
    +}
    +
    +// SetTokenCode sets the TokenCode field's value.
    +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
    +	s.TokenCode = &v
    +	return s
    +}
    +
    +// Contains the response to a successful GetSessionToken request, including
    +// temporary AWS credentials that can be used to make AWS requests.
    +type GetSessionTokenOutput struct {
    +	_ struct{} `type:"structure"`
    +
    +	// The temporary security credentials, which include an access key ID, a secret
    +	// access key, and a security (or session) token.
    +	//
    +	// Note: The size of the security token that STS APIs return is not fixed. We
    +	// strongly recommend that you make no assumptions about the maximum size. As
    +	// of this writing, the typical size is less than 4096 bytes, but that can vary.
    +	// Also, future updates to AWS might require larger sizes.
    +	Credentials *Credentials `type:"structure"`
    +}
    +
    +// String returns the string representation
    +func (s GetSessionTokenOutput) String() string {
    +	return awsutil.Prettify(s)
    +}
    +
    +// GoString returns the string representation
    +func (s GetSessionTokenOutput) GoString() string {
    +	return s.String()
    +}
    +
    +// SetCredentials sets the Credentials field's value.
    +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
    +	s.Credentials = v
    +	return s
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
    new file mode 100644
    index 0000000..4010cc7
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
    @@ -0,0 +1,12 @@
    +package sts
    +
    +import "github.com/aws/aws-sdk-go/aws/request"
    +
    +func init() {
    +	initRequest = func(r *request.Request) {
    +		switch r.Operation.Name {
    +		case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
    +			r.Handlers.Sign.Clear() // these operations are unsigned
    +		}
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
    new file mode 100644
    index 0000000..a9b9b32
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
    @@ -0,0 +1,130 @@
    +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
    +
    +package sts
    +
    +import (
    +	"github.com/aws/aws-sdk-go/aws"
    +	"github.com/aws/aws-sdk-go/aws/client"
    +	"github.com/aws/aws-sdk-go/aws/client/metadata"
    +	"github.com/aws/aws-sdk-go/aws/request"
    +	"github.com/aws/aws-sdk-go/aws/signer/v4"
    +	"github.com/aws/aws-sdk-go/private/protocol/query"
    +)
    +
    +// The AWS Security Token Service (STS) is a web service that enables you to
    +// request temporary, limited-privilege credentials for AWS Identity and Access
    +// Management (IAM) users or for users that you authenticate (federated users).
    +// This guide provides descriptions of the STS API. For more detailed information
    +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
    +//
    +// As an alternative to using the API, you can use one of the AWS SDKs, which
    +// consist of libraries and sample code for various programming languages and
    +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
    +// way to create programmatic access to STS. For example, the SDKs take care
    +// of cryptographically signing requests, managing errors, and retrying requests
    +// automatically. For information about the AWS SDKs, including how to download
    +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
    +//
    +// For information about setting up signatures and authorization through the
    +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
    +// in the AWS General Reference. For general information about the Query API,
    +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
    +// in Using IAM. For information about using security tokens with other AWS
    +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
    +// in the IAM User Guide.
    +//
    +// If you're new to AWS and need additional technical information about a specific
    +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
    +// (http://aws.amazon.com/documentation/).
    +//
    +// Endpoints
    +//
    +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
    +// that maps to the US East (N. Virginia) region. Additional regions are available
    +// and are activated by default. For more information, see Activating and Deactivating
    +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
    +// in the IAM User Guide.
    +//
    +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
    +// in the AWS General Reference.
    +//
    +// Recording API requests
    +//
    +// STS supports AWS CloudTrail, which is a service that records AWS calls for
    +// your AWS account and delivers log files to an Amazon S3 bucket. By using
    +// information collected by CloudTrail, you can determine what requests were
    +// successfully made to STS, who made the request, when it was made, and so
    +// on. To learn more about CloudTrail, including how to turn it on and find
    +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
    +//The service client's operations are safe to be used concurrently.
    +// It is not safe to mutate any of the client's properties though.
    +type STS struct {
    +	*client.Client
    +}
    +
    +// Used for custom client initialization logic
    +var initClient func(*client.Client)
    +
    +// Used for custom request initialization logic
    +var initRequest func(*request.Request)
    +
    +// A ServiceName is the name of the service the client will make API calls to.
    +const ServiceName = "sts"
    +
    +// New creates a new instance of the STS client with a session.
    +// If additional configuration is needed for the client instance use the optional
    +// aws.Config parameter to add your extra config.
    +//
    +// Example:
    +//     // Create a STS client from just a session.
    +//     svc := sts.New(mySession)
    +//
    +//     // Create a STS client with additional configuration
    +//     svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
    +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
    +	c := p.ClientConfig(ServiceName, cfgs...)
    +	return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
    +}
    +
    +// newClient creates, initializes and returns a new service client instance.
    +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
    +	svc := &STS{
    +		Client: client.New(
    +			cfg,
    +			metadata.ClientInfo{
    +				ServiceName:   ServiceName,
    +				SigningRegion: signingRegion,
    +				Endpoint:      endpoint,
    +				APIVersion:    "2011-06-15",
    +			},
    +			handlers,
    +		),
    +	}
    +
    +	// Handlers
    +	svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
    +	svc.Handlers.Build.PushBackNamed(query.BuildHandler)
    +	svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
    +	svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
    +	svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
    +
    +	// Run custom client initialization if present
    +	if initClient != nil {
    +		initClient(svc.Client)
    +	}
    +
    +	return svc
    +}
    +
    +// newRequest creates a new request for a STS operation and runs any
    +// custom request initialization.
    +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
    +	req := c.NewRequest(op, params, data)
    +
    +	// Run custom request initialization if present
    +	if initRequest != nil {
    +		initRequest(req)
    +	}
    +
    +	return req
    +}
    diff --git a/src/prometheus/vendor/github.com/beorn7/perks/LICENSE b/src/prometheus/vendor/github.com/beorn7/perks/LICENSE
    new file mode 100644
    index 0000000..339177b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/beorn7/perks/LICENSE
    @@ -0,0 +1,20 @@
    +Copyright (C) 2013 Blake Mizerany
    +
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of this software and associated documentation files (the
    +"Software"), to deal in the Software without restriction, including
    +without limitation the rights to use, copy, modify, merge, publish,
    +distribute, sublicense, and/or sell copies of the Software, and to
    +permit persons to whom the Software is furnished to do so, subject to
    +the following conditions:
    +
    +The above copyright notice and this permission notice shall be
    +included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    diff --git a/src/prometheus/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/src/prometheus/vendor/github.com/beorn7/perks/quantile/exampledata.txt
    new file mode 100644
    index 0000000..1602287
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/beorn7/perks/quantile/exampledata.txt
    @@ -0,0 +1,2388 @@
    +8
    +5
    +26
    +12
    +5
    +235
    +13
    +6
    +28
    +30
    +3
    +3
    +3
    +3
    +5
    +2
    +33
    +7
    +2
    +4
    +7
    +12
    +14
    +5
    +8
    +3
    +10
    +4
    +5
    +3
    +6
    +6
    +209
    +20
    +3
    +10
    +14
    +3
    +4
    +6
    +8
    +5
    +11
    +7
    +3
    +2
    +3
    +3
    +212
    +5
    +222
    +4
    +10
    +10
    +5
    +6
    +3
    +8
    +3
    +10
    +254
    +220
    +2
    +3
    +5
    +24
    +5
    +4
    +222
    +7
    +3
    +3
    +223
    +8
    +15
    +12
    +14
    +14
    +3
    +2
    +2
    +3
    +13
    +3
    +11
    +4
    +4
    +6
    +5
    +7
    +13
    +5
    +3
    +5
    +2
    +5
    +3
    +5
    +2
    +7
    +15
    +17
    +14
    +3
    +6
    +6
    +3
    +17
    +5
    +4
    +7
    +6
    +4
    +4
    +8
    +6
    +8
    +3
    +9
    +3
    +6
    +3
    +4
    +5
    +3
    +3
    +660
    +4
    +6
    +10
    +3
    +6
    +3
    +2
    +5
    +13
    +2
    +4
    +4
    +10
    +4
    +8
    +4
    +3
    +7
    +9
    +9
    +3
    +10
    +37
    +3
    +13
    +4
    +12
    +3
    +6
    +10
    +8
    +5
    +21
    +2
    +3
    +8
    +3
    +2
    +3
    +3
    +4
    +12
    +2
    +4
    +8
    +8
    +4
    +3
    +2
    +20
    +1
    +6
    +32
    +2
    +11
    +6
    +18
    +3
    +8
    +11
    +3
    +212
    +3
    +4
    +2
    +6
    +7
    +12
    +11
    +3
    +2
    +16
    +10
    +6
    +4
    +6
    +3
    +2
    +7
    +3
    +2
    +2
    +2
    +2
    +5
    +6
    +4
    +3
    +10
    +3
    +4
    +6
    +5
    +3
    +4
    +4
    +5
    +6
    +4
    +3
    +4
    +4
    +5
    +7
    +5
    +5
    +3
    +2
    +7
    +2
    +4
    +12
    +4
    +5
    +6
    +2
    +4
    +4
    +8
    +4
    +15
    +13
    +7
    +16
    +5
    +3
    +23
    +5
    +5
    +7
    +3
    +2
    +9
    +8
    +7
    +5
    +8
    +11
    +4
    +10
    +76
    +4
    +47
    +4
    +3
    +2
    +7
    +4
    +2
    +3
    +37
    +10
    +4
    +2
    +20
    +5
    +4
    +4
    +10
    +10
    +4
    +3
    +7
    +23
    +240
    +7
    +13
    +5
    +5
    +3
    +3
    +2
    +5
    +4
    +2
    +8
    +7
    +19
    +2
    +23
    +8
    +7
    +2
    +5
    +3
    +8
    +3
    +8
    +13
    +5
    +5
    +5
    +2
    +3
    +23
    +4
    +9
    +8
    +4
    +3
    +3
    +5
    +220
    +2
    +3
    +4
    +6
    +14
    +3
    +53
    +6
    +2
    +5
    +18
    +6
    +3
    +219
    +6
    +5
    +2
    +5
    +3
    +6
    +5
    +15
    +4
    +3
    +17
    +3
    +2
    +4
    +7
    +2
    +3
    +3
    +4
    +4
    +3
    +2
    +664
    +6
    +3
    +23
    +5
    +5
    +16
    +5
    +8
    +2
    +4
    +2
    +24
    +12
    +3
    +2
    +3
    +5
    +8
    +3
    +5
    +4
    +3
    +14
    +3
    +5
    +8
    +2
    +3
    +7
    +9
    +4
    +2
    +3
    +6
    +8
    +4
    +3
    +4
    +6
    +5
    +3
    +3
    +6
    +3
    +19
    +4
    +4
    +6
    +3
    +6
    +3
    +5
    +22
    +5
    +4
    +4
    +3
    +8
    +11
    +4
    +9
    +7
    +6
    +13
    +4
    +4
    +4
    +6
    +17
    +9
    +3
    +3
    +3
    +4
    +3
    +221
    +5
    +11
    +3
    +4
    +2
    +12
    +6
    +3
    +5
    +7
    +5
    +7
    +4
    +9
    +7
    +14
    +37
    +19
    +217
    +16
    +3
    +5
    +2
    +2
    +7
    +19
    +7
    +6
    +7
    +4
    +24
    +5
    +11
    +4
    +7
    +7
    +9
    +13
    +3
    +4
    +3
    +6
    +28
    +4
    +4
    +5
    +5
    +2
    +5
    +6
    +4
    +4
    +6
    +10
    +5
    +4
    +3
    +2
    +3
    +3
    +6
    +5
    +5
    +4
    +3
    +2
    +3
    +7
    +4
    +6
    +18
    +16
    +8
    +16
    +4
    +5
    +8
    +6
    +9
    +13
    +1545
    +6
    +215
    +6
    +5
    +6
    +3
    +45
    +31
    +5
    +2
    +2
    +4
    +3
    +3
    +2
    +5
    +4
    +3
    +5
    +7
    +7
    +4
    +5
    +8
    +5
    +4
    +749
    +2
    +31
    +9
    +11
    +2
    +11
    +5
    +4
    +4
    +7
    +9
    +11
    +4
    +5
    +4
    +7
    +3
    +4
    +6
    +2
    +15
    +3
    +4
    +3
    +4
    +3
    +5
    +2
    +13
    +5
    +5
    +3
    +3
    +23
    +4
    +4
    +5
    +7
    +4
    +13
    +2
    +4
    +3
    +4
    +2
    +6
    +2
    +7
    +3
    +5
    +5
    +3
    +29
    +5
    +4
    +4
    +3
    +10
    +2
    +3
    +79
    +16
    +6
    +6
    +7
    +7
    +3
    +5
    +5
    +7
    +4
    +3
    +7
    +9
    +5
    +6
    +5
    +9
    +6
    +3
    +6
    +4
    +17
    +2
    +10
    +9
    +3
    +6
    +2
    +3
    +21
    +22
    +5
    +11
    +4
    +2
    +17
    +2
    +224
    +2
    +14
    +3
    +4
    +4
    +2
    +4
    +4
    +4
    +4
    +5
    +3
    +4
    +4
    +10
    +2
    +6
    +3
    +3
    +5
    +7
    +2
    +7
    +5
    +6
    +3
    +218
    +2
    +2
    +5
    +2
    +6
    +3
    +5
    +222
    +14
    +6
    +33
    +3
    +2
    +5
    +3
    +3
    +3
    +9
    +5
    +3
    +3
    +2
    +7
    +4
    +3
    +4
    +3
    +5
    +6
    +5
    +26
    +4
    +13
    +9
    +7
    +3
    +221
    +3
    +3
    +4
    +4
    +4
    +4
    +2
    +18
    +5
    +3
    +7
    +9
    +6
    +8
    +3
    +10
    +3
    +11
    +9
    +5
    +4
    +17
    +5
    +5
    +6
    +6
    +3
    +2
    +4
    +12
    +17
    +6
    +7
    +218
    +4
    +2
    +4
    +10
    +3
    +5
    +15
    +3
    +9
    +4
    +3
    +3
    +6
    +29
    +3
    +3
    +4
    +5
    +5
    +3
    +8
    +5
    +6
    +6
    +7
    +5
    +3
    +5
    +3
    +29
    +2
    +31
    +5
    +15
    +24
    +16
    +5
    +207
    +4
    +3
    +3
    +2
    +15
    +4
    +4
    +13
    +5
    +5
    +4
    +6
    +10
    +2
    +7
    +8
    +4
    +6
    +20
    +5
    +3
    +4
    +3
    +12
    +12
    +5
    +17
    +7
    +3
    +3
    +3
    +6
    +10
    +3
    +5
    +25
    +80
    +4
    +9
    +3
    +2
    +11
    +3
    +3
    +2
    +3
    +8
    +7
    +5
    +5
    +19
    +5
    +3
    +3
    +12
    +11
    +2
    +6
    +5
    +5
    +5
    +3
    +3
    +3
    +4
    +209
    +14
    +3
    +2
    +5
    +19
    +4
    +4
    +3
    +4
    +14
    +5
    +6
    +4
    +13
    +9
    +7
    +4
    +7
    +10
    +2
    +9
    +5
    +7
    +2
    +8
    +4
    +6
    +5
    +5
    +222
    +8
    +7
    +12
    +5
    +216
    +3
    +4
    +4
    +6
    +3
    +14
    +8
    +7
    +13
    +4
    +3
    +3
    +3
    +3
    +17
    +5
    +4
    +3
    +33
    +6
    +6
    +33
    +7
    +5
    +3
    +8
    +7
    +5
    +2
    +9
    +4
    +2
    +233
    +24
    +7
    +4
    +8
    +10
    +3
    +4
    +15
    +2
    +16
    +3
    +3
    +13
    +12
    +7
    +5
    +4
    +207
    +4
    +2
    +4
    +27
    +15
    +2
    +5
    +2
    +25
    +6
    +5
    +5
    +6
    +13
    +6
    +18
    +6
    +4
    +12
    +225
    +10
    +7
    +5
    +2
    +2
    +11
    +4
    +14
    +21
    +8
    +10
    +3
    +5
    +4
    +232
    +2
    +5
    +5
    +3
    +7
    +17
    +11
    +6
    +6
    +23
    +4
    +6
    +3
    +5
    +4
    +2
    +17
    +3
    +6
    +5
    +8
    +3
    +2
    +2
    +14
    +9
    +4
    +4
    +2
    +5
    +5
    +3
    +7
    +6
    +12
    +6
    +10
    +3
    +6
    +2
    +2
    +19
    +5
    +4
    +4
    +9
    +2
    +4
    +13
    +3
    +5
    +6
    +3
    +6
    +5
    +4
    +9
    +6
    +3
    +5
    +7
    +3
    +6
    +6
    +4
    +3
    +10
    +6
    +3
    +221
    +3
    +5
    +3
    +6
    +4
    +8
    +5
    +3
    +6
    +4
    +4
    +2
    +54
    +5
    +6
    +11
    +3
    +3
    +4
    +4
    +4
    +3
    +7
    +3
    +11
    +11
    +7
    +10
    +6
    +13
    +223
    +213
    +15
    +231
    +7
    +3
    +7
    +228
    +2
    +3
    +4
    +4
    +5
    +6
    +7
    +4
    +13
    +3
    +4
    +5
    +3
    +6
    +4
    +6
    +7
    +2
    +4
    +3
    +4
    +3
    +3
    +6
    +3
    +7
    +3
    +5
    +18
    +5
    +6
    +8
    +10
    +3
    +3
    +3
    +2
    +4
    +2
    +4
    +4
    +5
    +6
    +6
    +4
    +10
    +13
    +3
    +12
    +5
    +12
    +16
    +8
    +4
    +19
    +11
    +2
    +4
    +5
    +6
    +8
    +5
    +6
    +4
    +18
    +10
    +4
    +2
    +216
    +6
    +6
    +6
    +2
    +4
    +12
    +8
    +3
    +11
    +5
    +6
    +14
    +5
    +3
    +13
    +4
    +5
    +4
    +5
    +3
    +28
    +6
    +3
    +7
    +219
    +3
    +9
    +7
    +3
    +10
    +6
    +3
    +4
    +19
    +5
    +7
    +11
    +6
    +15
    +19
    +4
    +13
    +11
    +3
    +7
    +5
    +10
    +2
    +8
    +11
    +2
    +6
    +4
    +6
    +24
    +6
    +3
    +3
    +3
    +3
    +6
    +18
    +4
    +11
    +4
    +2
    +5
    +10
    +8
    +3
    +9
    +5
    +3
    +4
    +5
    +6
    +2
    +5
    +7
    +4
    +4
    +14
    +6
    +4
    +4
    +5
    +5
    +7
    +2
    +4
    +3
    +7
    +3
    +3
    +6
    +4
    +5
    +4
    +4
    +4
    +3
    +3
    +3
    +3
    +8
    +14
    +2
    +3
    +5
    +3
    +2
    +4
    +5
    +3
    +7
    +3
    +3
    +18
    +3
    +4
    +4
    +5
    +7
    +3
    +3
    +3
    +13
    +5
    +4
    +8
    +211
    +5
    +5
    +3
    +5
    +2
    +5
    +4
    +2
    +655
    +6
    +3
    +5
    +11
    +2
    +5
    +3
    +12
    +9
    +15
    +11
    +5
    +12
    +217
    +2
    +6
    +17
    +3
    +3
    +207
    +5
    +5
    +4
    +5
    +9
    +3
    +2
    +8
    +5
    +4
    +3
    +2
    +5
    +12
    +4
    +14
    +5
    +4
    +2
    +13
    +5
    +8
    +4
    +225
    +4
    +3
    +4
    +5
    +4
    +3
    +3
    +6
    +23
    +9
    +2
    +6
    +7
    +233
    +4
    +4
    +6
    +18
    +3
    +4
    +6
    +3
    +4
    +4
    +2
    +3
    +7
    +4
    +13
    +227
    +4
    +3
    +5
    +4
    +2
    +12
    +9
    +17
    +3
    +7
    +14
    +6
    +4
    +5
    +21
    +4
    +8
    +9
    +2
    +9
    +25
    +16
    +3
    +6
    +4
    +7
    +8
    +5
    +2
    +3
    +5
    +4
    +3
    +3
    +5
    +3
    +3
    +3
    +2
    +3
    +19
    +2
    +4
    +3
    +4
    +2
    +3
    +4
    +4
    +2
    +4
    +3
    +3
    +3
    +2
    +6
    +3
    +17
    +5
    +6
    +4
    +3
    +13
    +5
    +3
    +3
    +3
    +4
    +9
    +4
    +2
    +14
    +12
    +4
    +5
    +24
    +4
    +3
    +37
    +12
    +11
    +21
    +3
    +4
    +3
    +13
    +4
    +2
    +3
    +15
    +4
    +11
    +4
    +4
    +3
    +8
    +3
    +4
    +4
    +12
    +8
    +5
    +3
    +3
    +4
    +2
    +220
    +3
    +5
    +223
    +3
    +3
    +3
    +10
    +3
    +15
    +4
    +241
    +9
    +7
    +3
    +6
    +6
    +23
    +4
    +13
    +7
    +3
    +4
    +7
    +4
    +9
    +3
    +3
    +4
    +10
    +5
    +5
    +1
    +5
    +24
    +2
    +4
    +5
    +5
    +6
    +14
    +3
    +8
    +2
    +3
    +5
    +13
    +13
    +3
    +5
    +2
    +3
    +15
    +3
    +4
    +2
    +10
    +4
    +4
    +4
    +5
    +5
    +3
    +5
    +3
    +4
    +7
    +4
    +27
    +3
    +6
    +4
    +15
    +3
    +5
    +6
    +6
    +5
    +4
    +8
    +3
    +9
    +2
    +6
    +3
    +4
    +3
    +7
    +4
    +18
    +3
    +11
    +3
    +3
    +8
    +9
    +7
    +24
    +3
    +219
    +7
    +10
    +4
    +5
    +9
    +12
    +2
    +5
    +4
    +4
    +4
    +3
    +3
    +19
    +5
    +8
    +16
    +8
    +6
    +22
    +3
    +23
    +3
    +242
    +9
    +4
    +3
    +3
    +5
    +7
    +3
    +3
    +5
    +8
    +3
    +7
    +5
    +14
    +8
    +10
    +3
    +4
    +3
    +7
    +4
    +6
    +7
    +4
    +10
    +4
    +3
    +11
    +3
    +7
    +10
    +3
    +13
    +6
    +8
    +12
    +10
    +5
    +7
    +9
    +3
    +4
    +7
    +7
    +10
    +8
    +30
    +9
    +19
    +4
    +3
    +19
    +15
    +4
    +13
    +3
    +215
    +223
    +4
    +7
    +4
    +8
    +17
    +16
    +3
    +7
    +6
    +5
    +5
    +4
    +12
    +3
    +7
    +4
    +4
    +13
    +4
    +5
    +2
    +5
    +6
    +5
    +6
    +6
    +7
    +10
    +18
    +23
    +9
    +3
    +3
    +6
    +5
    +2
    +4
    +2
    +7
    +3
    +3
    +2
    +5
    +5
    +14
    +10
    +224
    +6
    +3
    +4
    +3
    +7
    +5
    +9
    +3
    +6
    +4
    +2
    +5
    +11
    +4
    +3
    +3
    +2
    +8
    +4
    +7
    +4
    +10
    +7
    +3
    +3
    +18
    +18
    +17
    +3
    +3
    +3
    +4
    +5
    +3
    +3
    +4
    +12
    +7
    +3
    +11
    +13
    +5
    +4
    +7
    +13
    +5
    +4
    +11
    +3
    +12
    +3
    +6
    +4
    +4
    +21
    +4
    +6
    +9
    +5
    +3
    +10
    +8
    +4
    +6
    +4
    +4
    +6
    +5
    +4
    +8
    +6
    +4
    +6
    +4
    +4
    +5
    +9
    +6
    +3
    +4
    +2
    +9
    +3
    +18
    +2
    +4
    +3
    +13
    +3
    +6
    +6
    +8
    +7
    +9
    +3
    +2
    +16
    +3
    +4
    +6
    +3
    +2
    +33
    +22
    +14
    +4
    +9
    +12
    +4
    +5
    +6
    +3
    +23
    +9
    +4
    +3
    +5
    +5
    +3
    +4
    +5
    +3
    +5
    +3
    +10
    +4
    +5
    +5
    +8
    +4
    +4
    +6
    +8
    +5
    +4
    +3
    +4
    +6
    +3
    +3
    +3
    +5
    +9
    +12
    +6
    +5
    +9
    +3
    +5
    +3
    +2
    +2
    +2
    +18
    +3
    +2
    +21
    +2
    +5
    +4
    +6
    +4
    +5
    +10
    +3
    +9
    +3
    +2
    +10
    +7
    +3
    +6
    +6
    +4
    +4
    +8
    +12
    +7
    +3
    +7
    +3
    +3
    +9
    +3
    +4
    +5
    +4
    +4
    +5
    +5
    +10
    +15
    +4
    +4
    +14
    +6
    +227
    +3
    +14
    +5
    +216
    +22
    +5
    +4
    +2
    +2
    +6
    +3
    +4
    +2
    +9
    +9
    +4
    +3
    +28
    +13
    +11
    +4
    +5
    +3
    +3
    +2
    +3
    +3
    +5
    +3
    +4
    +3
    +5
    +23
    +26
    +3
    +4
    +5
    +6
    +4
    +6
    +3
    +5
    +5
    +3
    +4
    +3
    +2
    +2
    +2
    +7
    +14
    +3
    +6
    +7
    +17
    +2
    +2
    +15
    +14
    +16
    +4
    +6
    +7
    +13
    +6
    +4
    +5
    +6
    +16
    +3
    +3
    +28
    +3
    +6
    +15
    +3
    +9
    +2
    +4
    +6
    +3
    +3
    +22
    +4
    +12
    +6
    +7
    +2
    +5
    +4
    +10
    +3
    +16
    +6
    +9
    +2
    +5
    +12
    +7
    +5
    +5
    +5
    +5
    +2
    +11
    +9
    +17
    +4
    +3
    +11
    +7
    +3
    +5
    +15
    +4
    +3
    +4
    +211
    +8
    +7
    +5
    +4
    +7
    +6
    +7
    +6
    +3
    +6
    +5
    +6
    +5
    +3
    +4
    +4
    +26
    +4
    +6
    +10
    +4
    +4
    +3
    +2
    +3
    +3
    +4
    +5
    +9
    +3
    +9
    +4
    +4
    +5
    +5
    +8
    +2
    +4
    +2
    +3
    +8
    +4
    +11
    +19
    +5
    +8
    +6
    +3
    +5
    +6
    +12
    +3
    +2
    +4
    +16
    +12
    +3
    +4
    +4
    +8
    +6
    +5
    +6
    +6
    +219
    +8
    +222
    +6
    +16
    +3
    +13
    +19
    +5
    +4
    +3
    +11
    +6
    +10
    +4
    +7
    +7
    +12
    +5
    +3
    +3
    +5
    +6
    +10
    +3
    +8
    +2
    +5
    +4
    +7
    +2
    +4
    +4
    +2
    +12
    +9
    +6
    +4
    +2
    +40
    +2
    +4
    +10
    +4
    +223
    +4
    +2
    +20
    +6
    +7
    +24
    +5
    +4
    +5
    +2
    +20
    +16
    +6
    +5
    +13
    +2
    +3
    +3
    +19
    +3
    +2
    +4
    +5
    +6
    +7
    +11
    +12
    +5
    +6
    +7
    +7
    +3
    +5
    +3
    +5
    +3
    +14
    +3
    +4
    +4
    +2
    +11
    +1
    +7
    +3
    +9
    +6
    +11
    +12
    +5
    +8
    +6
    +221
    +4
    +2
    +12
    +4
    +3
    +15
    +4
    +5
    +226
    +7
    +218
    +7
    +5
    +4
    +5
    +18
    +4
    +5
    +9
    +4
    +4
    +2
    +9
    +18
    +18
    +9
    +5
    +6
    +6
    +3
    +3
    +7
    +3
    +5
    +4
    +4
    +4
    +12
    +3
    +6
    +31
    +5
    +4
    +7
    +3
    +6
    +5
    +6
    +5
    +11
    +2
    +2
    +11
    +11
    +6
    +7
    +5
    +8
    +7
    +10
    +5
    +23
    +7
    +4
    +3
    +5
    +34
    +2
    +5
    +23
    +7
    +3
    +6
    +8
    +4
    +4
    +4
    +2
    +5
    +3
    +8
    +5
    +4
    +8
    +25
    +2
    +3
    +17
    +8
    +3
    +4
    +8
    +7
    +3
    +15
    +6
    +5
    +7
    +21
    +9
    +5
    +6
    +6
    +5
    +3
    +2
    +3
    +10
    +3
    +6
    +3
    +14
    +7
    +4
    +4
    +8
    +7
    +8
    +2
    +6
    +12
    +4
    +213
    +6
    +5
    +21
    +8
    +2
    +5
    +23
    +3
    +11
    +2
    +3
    +6
    +25
    +2
    +3
    +6
    +7
    +6
    +6
    +4
    +4
    +6
    +3
    +17
    +9
    +7
    +6
    +4
    +3
    +10
    +7
    +2
    +3
    +3
    +3
    +11
    +8
    +3
    +7
    +6
    +4
    +14
    +36
    +3
    +4
    +3
    +3
    +22
    +13
    +21
    +4
    +2
    +7
    +4
    +4
    +17
    +15
    +3
    +7
    +11
    +2
    +4
    +7
    +6
    +209
    +6
    +3
    +2
    +2
    +24
    +4
    +9
    +4
    +3
    +3
    +3
    +29
    +2
    +2
    +4
    +3
    +3
    +5
    +4
    +6
    +3
    +3
    +2
    +4
    diff --git a/src/prometheus/vendor/github.com/beorn7/perks/quantile/stream.go b/src/prometheus/vendor/github.com/beorn7/perks/quantile/stream.go
    new file mode 100644
    index 0000000..587b1fc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/beorn7/perks/quantile/stream.go
    @@ -0,0 +1,292 @@
    +// Package quantile computes approximate quantiles over an unbounded data
    +// stream within low memory and CPU bounds.
    +//
    +// A small amount of accuracy is traded to achieve the above properties.
    +//
    +// Multiple streams can be merged before calling Query to generate a single set
    +// of results. This is meaningful when the streams represent the same type of
    +// data. See Merge and Samples.
    +//
    +// For more detailed information about the algorithm used, see:
    +//
    +// Effective Computation of Biased Quantiles over Data Streams
    +//
    +// http://www.cs.rutgers.edu/~muthu/bquant.pdf
    +package quantile
    +
    +import (
    +	"math"
    +	"sort"
    +)
    +
    +// Sample holds an observed value and meta information for compression. JSON
    +// tags have been added for convenience.
    +type Sample struct {
    +	Value float64 `json:",string"`
    +	Width float64 `json:",string"`
    +	Delta float64 `json:",string"`
    +}
    +
    +// Samples represents a slice of samples. It implements sort.Interface.
    +type Samples []Sample
    +
    +func (a Samples) Len() int           { return len(a) }
    +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
    +func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
    +
    +type invariant func(s *stream, r float64) float64
    +
    +// NewLowBiased returns an initialized Stream for low-biased quantiles
    +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
    +// error guarantees can still be given even for the lower ranks of the data
    +// distribution.
    +//
    +// The provided epsilon is a relative error, i.e. the true quantile of a value
    +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
    +//
    +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
    +// properties.
    +func NewLowBiased(epsilon float64) *Stream {
    +	ƒ := func(s *stream, r float64) float64 {
    +		return 2 * epsilon * r
    +	}
    +	return newStream(ƒ)
    +}
    +
    +// NewHighBiased returns an initialized Stream for high-biased quantiles
    +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
    +// error guarantees can still be given even for the higher ranks of the data
    +// distribution.
    +//
    +// The provided epsilon is a relative error, i.e. the true quantile of a value
    +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
    +//
    +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
    +// properties.
    +func NewHighBiased(epsilon float64) *Stream {
    +	ƒ := func(s *stream, r float64) float64 {
    +		return 2 * epsilon * (s.n - r)
    +	}
    +	return newStream(ƒ)
    +}
    +
    +// NewTargeted returns an initialized Stream concerned with a particular set of
    +// quantile values that are supplied a priori. Knowing these a priori reduces
    +// space and computation time. The targets map maps the desired quantiles to
    +// their absolute errors, i.e. the true quantile of a value returned by a query
    +// is guaranteed to be within (Quantile±Epsilon).
    +//
    +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
    +func NewTargeted(targets map[float64]float64) *Stream {
    +	ƒ := func(s *stream, r float64) float64 {
    +		var m = math.MaxFloat64
    +		var f float64
    +		for quantile, epsilon := range targets {
    +			if quantile*s.n <= r {
    +				f = (2 * epsilon * r) / quantile
    +			} else {
    +				f = (2 * epsilon * (s.n - r)) / (1 - quantile)
    +			}
    +			if f < m {
    +				m = f
    +			}
    +		}
    +		return m
    +	}
    +	return newStream(ƒ)
    +}
    +
    +// Stream computes quantiles for a stream of float64s. It is not thread-safe by
    +// design. Take care when using across multiple goroutines.
    +type Stream struct {
    +	*stream
    +	b      Samples
    +	sorted bool
    +}
    +
    +func newStream(ƒ invariant) *Stream {
    +	x := &stream{ƒ: ƒ}
    +	return &Stream{x, make(Samples, 0, 500), true}
    +}
    +
    +// Insert inserts v into the stream.
    +func (s *Stream) Insert(v float64) {
    +	s.insert(Sample{Value: v, Width: 1})
    +}
    +
    +func (s *Stream) insert(sample Sample) {
    +	s.b = append(s.b, sample)
    +	s.sorted = false
    +	if len(s.b) == cap(s.b) {
    +		s.flush()
    +	}
    +}
    +
    +// Query returns the computed qth percentiles value. If s was created with
    +// NewTargeted, and q is not in the set of quantiles provided a priori, Query
    +// will return an unspecified result.
    +func (s *Stream) Query(q float64) float64 {
    +	if !s.flushed() {
    +		// Fast path when there hasn't been enough data for a flush;
    +		// this also yields better accuracy for small sets of data.
    +		l := len(s.b)
    +		if l == 0 {
    +			return 0
    +		}
    +		i := int(float64(l) * q)
    +		if i > 0 {
    +			i -= 1
    +		}
    +		s.maybeSort()
    +		return s.b[i].Value
    +	}
    +	s.flush()
    +	return s.stream.query(q)
    +}
    +
    +// Merge merges samples into the underlying streams samples. This is handy when
    +// merging multiple streams from separate threads, database shards, etc.
    +//
    +// ATTENTION: This method is broken and does not yield correct results. The
    +// underlying algorithm is not capable of merging streams correctly.
    +func (s *Stream) Merge(samples Samples) {
    +	sort.Sort(samples)
    +	s.stream.merge(samples)
    +}
    +
    +// Reset reinitializes and clears the list reusing the samples buffer memory.
    +func (s *Stream) Reset() {
    +	s.stream.reset()
    +	s.b = s.b[:0]
    +}
    +
    +// Samples returns stream samples held by s.
    +func (s *Stream) Samples() Samples {
    +	if !s.flushed() {
    +		return s.b
    +	}
    +	s.flush()
    +	return s.stream.samples()
    +}
    +
    +// Count returns the total number of samples observed in the stream
    +// since initialization.
    +func (s *Stream) Count() int {
    +	return len(s.b) + s.stream.count()
    +}
    +
    +func (s *Stream) flush() {
    +	s.maybeSort()
    +	s.stream.merge(s.b)
    +	s.b = s.b[:0]
    +}
    +
    +func (s *Stream) maybeSort() {
    +	if !s.sorted {
    +		s.sorted = true
    +		sort.Sort(s.b)
    +	}
    +}
    +
    +func (s *Stream) flushed() bool {
    +	return len(s.stream.l) > 0
    +}
    +
    +type stream struct {
    +	n float64
    +	l []Sample
    +	ƒ invariant
    +}
    +
    +func (s *stream) reset() {
    +	s.l = s.l[:0]
    +	s.n = 0
    +}
    +
    +func (s *stream) insert(v float64) {
    +	s.merge(Samples{{v, 1, 0}})
    +}
    +
    +func (s *stream) merge(samples Samples) {
    +	// TODO(beorn7): This tries to merge not only individual samples, but
    +	// whole summaries. The paper doesn't mention merging summaries at
    +	// all. Unittests show that the merging is inaccurate. Find out how to
    +	// do merges properly.
    +	var r float64
    +	i := 0
    +	for _, sample := range samples {
    +		for ; i < len(s.l); i++ {
    +			c := s.l[i]
    +			if c.Value > sample.Value {
    +				// Insert at position i.
    +				s.l = append(s.l, Sample{})
    +				copy(s.l[i+1:], s.l[i:])
    +				s.l[i] = Sample{
    +					sample.Value,
    +					sample.Width,
    +					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
    +					// TODO(beorn7): How to calculate delta correctly?
    +				}
    +				i++
    +				goto inserted
    +			}
    +			r += c.Width
    +		}
    +		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
    +		i++
    +	inserted:
    +		s.n += sample.Width
    +		r += sample.Width
    +	}
    +	s.compress()
    +}
    +
    +func (s *stream) count() int {
    +	return int(s.n)
    +}
    +
    +func (s *stream) query(q float64) float64 {
    +	t := math.Ceil(q * s.n)
    +	t += math.Ceil(s.ƒ(s, t) / 2)
    +	p := s.l[0]
    +	var r float64
    +	for _, c := range s.l[1:] {
    +		r += p.Width
    +		if r+c.Width+c.Delta > t {
    +			return p.Value
    +		}
    +		p = c
    +	}
    +	return p.Value
    +}
    +
    +func (s *stream) compress() {
    +	if len(s.l) < 2 {
    +		return
    +	}
    +	x := s.l[len(s.l)-1]
    +	xi := len(s.l) - 1
    +	r := s.n - 1 - x.Width
    +
    +	for i := len(s.l) - 2; i >= 0; i-- {
    +		c := s.l[i]
    +		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
    +			x.Width += c.Width
    +			s.l[xi] = x
    +			// Remove element at i.
    +			copy(s.l[i:], s.l[i+1:])
    +			s.l = s.l[:len(s.l)-1]
    +			xi -= 1
    +		} else {
    +			x = c
    +			xi = i
    +		}
    +		r -= c.Width
    +	}
    +}
    +
    +func (s *stream) samples() Samples {
    +	samples := make(Samples, len(s.l))
    +	copy(samples, s.l)
    +	return samples
    +}
    diff --git a/src/prometheus/vendor/github.com/cespare/xxhash/LICENSE.txt b/src/prometheus/vendor/github.com/cespare/xxhash/LICENSE.txt
    new file mode 100644
    index 0000000..24b5306
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cespare/xxhash/LICENSE.txt
    @@ -0,0 +1,22 @@
    +Copyright (c) 2016 Caleb Spare
    +
    +MIT License
    +
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of this software and associated documentation files (the
    +"Software"), to deal in the Software without restriction, including
    +without limitation the rights to use, copy, modify, merge, publish,
    +distribute, sublicense, and/or sell copies of the Software, and to
    +permit persons to whom the Software is furnished to do so, subject to
    +the following conditions:
    +
    +The above copyright notice and this permission notice shall be
    +included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    diff --git a/src/prometheus/vendor/github.com/cespare/xxhash/README.md b/src/prometheus/vendor/github.com/cespare/xxhash/README.md
    new file mode 100644
    index 0000000..8a555d5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cespare/xxhash/README.md
    @@ -0,0 +1,34 @@
    +# xxhash
    +
    +[![GoDoc](https://godoc.org/github.com/cespare/mph?status.svg)](https://godoc.org/github.com/cespare/xxhash)
    +
    +xxhash is a Go implementation of the 64-bit
    +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
    +high-quality hashing algorithm that is much faster than anything in the Go
    +standard library.
    +
    +The API is very small, taking its cue from the other hashing packages in the
    +standard library:
    +
    +    $ go doc github.com/cespare/xxhash                                                                                                                                                                                              !
    +    package xxhash // import "github.com/cespare/xxhash"
    +
    +    Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
    +    at http://cyan4973.github.io/xxHash/.
    +
    +    func New() hash.Hash64
    +    func Sum64(b []byte) uint64
    +
    +This implementation provides a fast pure-Go implementation and an even faster
    +assembly implementation for amd64.
    +
    +Here are some quick benchmarks comparing the pure-Go and assembly
    +implementations of Sum64 against another popular Go XXH64 implementation,
    +[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
    +
    +| input size | OneOfOne | cespare (noasm) | cespare |
    +| --- | --- | --- | --- |
    +| 5 B   |  438.34 MB/s |  596.40 MB/s |  711.11 MB/s  |
    +| 100 B | 3676.54 MB/s | 4301.40 MB/s | 4598.95 MB/s  |
    +| 4 KB  | 8128.64 MB/s | 8840.83 MB/s | 10549.72 MB/s |
    +| 10 MB | 7335.19 MB/s | 7736.64 MB/s | 9024.04 MB/s  |
    diff --git a/src/prometheus/vendor/github.com/cespare/xxhash/xxhash.go b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash.go
    new file mode 100644
    index 0000000..aeacdd1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash.go
    @@ -0,0 +1,180 @@
    +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
    +// at http://cyan4973.github.io/xxHash/.
    +package xxhash
    +
    +import (
    +	"encoding/binary"
    +	"hash"
    +)
    +
    +const (
    +	prime1 uint64 = 11400714785074694791
    +	prime2 uint64 = 14029467366897019727
    +	prime3 uint64 = 1609587929392839161
    +	prime4 uint64 = 9650029242287828579
    +	prime5 uint64 = 2870177450012600261
    +)
    +
    +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
    +// possible in the Go code is worth a small (but measurable) performance boost
    +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
    +// convenience in the Go code in a few places where we need to intentionally
    +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
    +// result overflows a uint64).
    +var (
    +	prime1v = prime1
    +	prime2v = prime2
    +	prime3v = prime3
    +	prime4v = prime4
    +	prime5v = prime5
    +)
    +
    +type xxh struct {
    +	v1    uint64
    +	v2    uint64
    +	v3    uint64
    +	v4    uint64
    +	total int
    +	mem   [32]byte
    +	n     int // how much of mem is used
    +}
    +
    +// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
    +func New() hash.Hash64 {
    +	var x xxh
    +	x.Reset()
    +	return &x
    +}
    +
    +func (x *xxh) Reset() {
    +	x.n = 0
    +	x.total = 0
    +	x.v1 = prime1v + prime2
    +	x.v2 = prime2
    +	x.v3 = 0
    +	x.v4 = -prime1v
    +}
    +
    +func (x *xxh) Size() int      { return 8 }
    +func (x *xxh) BlockSize() int { return 32 }
    +
    +// Write adds more data to x. It always returns len(b), nil.
    +func (x *xxh) Write(b []byte) (n int, err error) {
    +	n = len(b)
    +	x.total += len(b)
    +
    +	if x.n+len(b) < 32 {
    +		// This new data doesn't even fill the current block.
    +		copy(x.mem[x.n:], b)
    +		x.n += len(b)
    +		return
    +	}
    +
    +	if x.n > 0 {
    +		// Finish off the partial block.
    +		copy(x.mem[x.n:], b)
    +		x.v1 = round(x.v1, u64(x.mem[0:8]))
    +		x.v2 = round(x.v2, u64(x.mem[8:16]))
    +		x.v3 = round(x.v3, u64(x.mem[16:24]))
    +		x.v4 = round(x.v4, u64(x.mem[24:32]))
    +		b = b[32-x.n:]
    +		x.n = 0
    +	}
    +
    +	if len(b) >= 32 {
    +		// One or more full blocks left.
    +		b = writeBlocks(x, b)
    +	}
    +
    +	// Store any remaining partial block.
    +	copy(x.mem[:], b)
    +	x.n = len(b)
    +
    +	return
    +}
    +
    +func (x *xxh) Sum(b []byte) []byte {
    +	s := x.Sum64()
    +	return append(
    +		b,
    +		byte(s>>56),
    +		byte(s>>48),
    +		byte(s>>40),
    +		byte(s>>32),
    +		byte(s>>24),
    +		byte(s>>16),
    +		byte(s>>8),
    +		byte(s),
    +	)
    +}
    +
    +func (x *xxh) Sum64() uint64 {
    +	var h uint64
    +
    +	if x.total >= 32 {
    +		v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
    +		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
    +		h = mergeRound(h, v1)
    +		h = mergeRound(h, v2)
    +		h = mergeRound(h, v3)
    +		h = mergeRound(h, v4)
    +	} else {
    +		h = x.v3 + prime5
    +	}
    +
    +	h += uint64(x.total)
    +
    +	i, end := 0, x.n
    +	for ; i+8 <= end; i += 8 {
    +		k1 := round(0, u64(x.mem[i:i+8]))
    +		h ^= k1
    +		h = rol27(h)*prime1 + prime4
    +	}
    +	if i+4 <= end {
    +		h ^= uint64(u32(x.mem[i:i+4])) * prime1
    +		h = rol23(h)*prime2 + prime3
    +		i += 4
    +	}
    +	for i < end {
    +		h ^= uint64(x.mem[i]) * prime5
    +		h = rol11(h) * prime1
    +		i++
    +	}
    +
    +	h ^= h >> 33
    +	h *= prime2
    +	h ^= h >> 29
    +	h *= prime3
    +	h ^= h >> 32
    +
    +	return h
    +}
    +
    +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
    +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
    +
    +func round(acc, input uint64) uint64 {
    +	acc += input * prime2
    +	acc = rol31(acc)
    +	acc *= prime1
    +	return acc
    +}
    +
    +func mergeRound(acc, val uint64) uint64 {
    +	val = round(0, val)
    +	acc ^= val
    +	acc = acc*prime1 + prime4
    +	return acc
    +}
    +
    +// It's important for performance to get the rotates to actually compile to
    +// ROLQs. gc will do this for us but only if rotate amount is a constant.
    +
    +func rol1(x uint64) uint64  { return (x << 1) | (x >> (64 - 1)) }
    +func rol7(x uint64) uint64  { return (x << 7) | (x >> (64 - 7)) }
    +func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
    +func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
    +func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
    +func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
    +func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
    +func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
    diff --git a/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.go
    new file mode 100644
    index 0000000..fc417c1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.go
    @@ -0,0 +1,12 @@
    +// +build !appengine
    +// +build gc
    +// +build !noasm
    +
    +package xxhash
    +
    +// Sum64 computes the 64-bit xxHash digest of b.
    +//
    +//go:noescape
    +func Sum64(b []byte) uint64
    +
    +func writeBlocks(x *xxh, b []byte) []byte
    diff --git a/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.s
    new file mode 100644
    index 0000000..37aa8f0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_amd64.s
    @@ -0,0 +1,233 @@
    +// +build !appengine
    +// +build gc
    +// +build !noasm
    +
    +#include "textflag.h"
    +
    +// Register allocation:
    +// AX	h
    +// CX	pointer to advance through b
    +// DX	n
    +// BX	loop end
    +// R8	v1, k1
    +// R9	v2
    +// R10	v3
    +// R11	v4
    +// R12	tmp
    +// R13	prime1v
    +// R14	prime2v
    +// R15	prime4v
    +
    +// round reads from and advances the buffer pointer in CX.
    +// It assumes that R13 has prime1v and R14 has prime2v.
    +#define round(r) \
    +	MOVQ  (CX), R12 \
    +	ADDQ  $8, CX    \
    +	IMULQ R14, R12  \
    +	ADDQ  R12, r    \
    +	ROLQ  $31, r    \
    +	IMULQ R13, r
    +
    +// mergeRound applies a merge round on the two registers acc and val.
    +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
    +#define mergeRound(acc, val) \
    +	IMULQ R14, val \
    +	ROLQ  $31, val \
    +	IMULQ R13, val \
    +	XORQ  val, acc \
    +	IMULQ R13, acc \
    +	ADDQ  R15, acc
    +
    +// func Sum64(b []byte) uint64
    +TEXT ·Sum64(SB), NOSPLIT, $0-32
    +	// Load fixed primes.
    +	MOVQ ·prime1v(SB), R13
    +	MOVQ ·prime2v(SB), R14
    +	MOVQ ·prime4v(SB), R15
    +
    +	// Load slice.
    +	MOVQ b_base+0(FP), CX
    +	MOVQ b_len+8(FP), DX
    +	LEAQ (CX)(DX*1), BX
    +
    +	// The first loop limit will be len(b)-32.
    +	SUBQ $32, BX
    +
    +	// Check whether we have at least one block.
    +	CMPQ DX, $32
    +	JLT  noBlocks
    +
    +	// Set up initial state (v1, v2, v3, v4).
    +	MOVQ R13, R8
    +	ADDQ R14, R8
    +	MOVQ R14, R9
    +	XORQ R10, R10
    +	XORQ R11, R11
    +	SUBQ R13, R11
    +
    +	// Loop until CX > BX.
    +blockLoop:
    +	round(R8)
    +	round(R9)
    +	round(R10)
    +	round(R11)
    +
    +	CMPQ CX, BX
    +	JLE  blockLoop
    +
    +	MOVQ R8, AX
    +	ROLQ $1, AX
    +	MOVQ R9, R12
    +	ROLQ $7, R12
    +	ADDQ R12, AX
    +	MOVQ R10, R12
    +	ROLQ $12, R12
    +	ADDQ R12, AX
    +	MOVQ R11, R12
    +	ROLQ $18, R12
    +	ADDQ R12, AX
    +
    +	mergeRound(AX, R8)
    +	mergeRound(AX, R9)
    +	mergeRound(AX, R10)
    +	mergeRound(AX, R11)
    +
    +	JMP afterBlocks
    +
    +noBlocks:
    +	MOVQ ·prime5v(SB), AX
    +
    +afterBlocks:
    +	ADDQ DX, AX
    +
    +	// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
    +	ADDQ $24, BX
    +
    +	CMPQ CX, BX
    +	JG   fourByte
    +
    +wordLoop:
    +	// Calculate k1.
    +	MOVQ  (CX), R8
    +	ADDQ  $8, CX
    +	IMULQ R14, R8
    +	ROLQ  $31, R8
    +	IMULQ R13, R8
    +
    +	XORQ  R8, AX
    +	ROLQ  $27, AX
    +	IMULQ R13, AX
    +	ADDQ  R15, AX
    +
    +	CMPQ CX, BX
    +	JLE  wordLoop
    +
    +fourByte:
    +	ADDQ $4, BX
    +	CMPQ CX, BX
    +	JG   singles
    +
    +	MOVL  (CX), R8
    +	ADDQ  $4, CX
    +	IMULQ R13, R8
    +	XORQ  R8, AX
    +
    +	ROLQ  $23, AX
    +	IMULQ R14, AX
    +	ADDQ  ·prime3v(SB), AX
    +
    +singles:
    +	ADDQ $4, BX
    +	CMPQ CX, BX
    +	JGE  finalize
    +
    +singlesLoop:
    +	MOVBQZX (CX), R12
    +	ADDQ    $1, CX
    +	IMULQ   ·prime5v(SB), R12
    +	XORQ    R12, AX
    +
    +	ROLQ  $11, AX
    +	IMULQ R13, AX
    +
    +	CMPQ CX, BX
    +	JL   singlesLoop
    +
    +finalize:
    +	MOVQ  AX, R12
    +	SHRQ  $33, R12
    +	XORQ  R12, AX
    +	IMULQ R14, AX
    +	MOVQ  AX, R12
    +	SHRQ  $29, R12
    +	XORQ  R12, AX
    +	IMULQ ·prime3v(SB), AX
    +	MOVQ  AX, R12
    +	SHRQ  $32, R12
    +	XORQ  R12, AX
    +
    +	MOVQ AX, ret+24(FP)
    +	RET
    +
    +// writeBlocks uses the same registers as above except that it uses AX to store
    +// the x pointer.
    +
    +// func writeBlocks(x *xxh, b []byte) []byte
    +TEXT ·writeBlocks(SB), NOSPLIT, $0-56
    +	// Load fixed primes needed for round.
    +	MOVQ ·prime1v(SB), R13
    +	MOVQ ·prime2v(SB), R14
    +
    +	// Load slice.
    +	MOVQ b_base+8(FP), CX
    +	MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
    +	MOVQ b_len+16(FP), DX
    +	LEAQ (CX)(DX*1), BX
    +	SUBQ $32, BX
    +
    +	// Load vN from x.
    +	MOVQ x+0(FP), AX
    +	MOVQ 0(AX), R8   // v1
    +	MOVQ 8(AX), R9   // v2
    +	MOVQ 16(AX), R10 // v3
    +	MOVQ 24(AX), R11 // v4
    +
    +	// We don't need to check the loop condition here; this function is
    +	// always called with at least one block of data to process.
    +blockLoop:
    +	round(R8)
    +	round(R9)
    +	round(R10)
    +	round(R11)
    +
    +	CMPQ CX, BX
    +	JLE  blockLoop
    +
    +	// Copy vN back to x.
    +	MOVQ R8, 0(AX)
    +	MOVQ R9, 8(AX)
    +	MOVQ R10, 16(AX)
    +	MOVQ R11, 24(AX)
    +
    +	// Construct return slice.
    +	// NOTE: It's important that we don't construct a slice that has a base
    +	// pointer off the end of the original slice, as in Go 1.7+ this will
    +	// cause runtime crashes. (See discussion in, for example,
    +	// https://github.com/golang/go/issues/16772.)
    +	// Therefore, we calculate the length/cap first, and if they're zero, we
    +	// keep the old base. This is what the compiler does as well if you
    +	// write code like
    +	//   b = b[len(b):]
    +
    +	// New length is 32 - (CX - BX) -> BX+32 - CX.
    +	ADDQ $32, BX
    +	SUBQ CX, BX
    +	JZ   afterSetBase
    +
    +	MOVQ CX, ret_base+32(FP)
    +
    +afterSetBase:
    +	MOVQ BX, ret_len+40(FP)
    +	MOVQ BX, ret_cap+48(FP) // set cap == len
    +
    +	RET
    diff --git a/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_other.go b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_other.go
    new file mode 100644
    index 0000000..058c315
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cespare/xxhash/xxhash_other.go
    @@ -0,0 +1,75 @@
    +// +build !amd64 appengine !gc noasm
    +
    +package xxhash
    +
    +// Sum64 computes the 64-bit xxHash digest of b.
    +func Sum64(b []byte) uint64 {
    +	// A simpler version would be
    +	//   x := New()
    +	//   x.Write(b)
    +	//   return x.Sum64()
    +	// but this is faster, particularly for small inputs.
    +
    +	n := len(b)
    +	var h uint64
    +
    +	if n >= 32 {
    +		v1 := prime1v + prime2
    +		v2 := prime2
    +		v3 := uint64(0)
    +		v4 := -prime1v
    +		for len(b) >= 32 {
    +			v1 = round(v1, u64(b[0:8:len(b)]))
    +			v2 = round(v2, u64(b[8:16:len(b)]))
    +			v3 = round(v3, u64(b[16:24:len(b)]))
    +			v4 = round(v4, u64(b[24:32:len(b)]))
    +			b = b[32:len(b):len(b)]
    +		}
    +		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
    +		h = mergeRound(h, v1)
    +		h = mergeRound(h, v2)
    +		h = mergeRound(h, v3)
    +		h = mergeRound(h, v4)
    +	} else {
    +		h = prime5
    +	}
    +
    +	h += uint64(n)
    +
    +	i, end := 0, len(b)
    +	for ; i+8 <= end; i += 8 {
    +		k1 := round(0, u64(b[i:i+8:len(b)]))
    +		h ^= k1
    +		h = rol27(h)*prime1 + prime4
    +	}
    +	if i+4 <= end {
    +		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
    +		h = rol23(h)*prime2 + prime3
    +		i += 4
    +	}
    +	for ; i < end; i++ {
    +		h ^= uint64(b[i]) * prime5
    +		h = rol11(h) * prime1
    +	}
    +
    +	h ^= h >> 33
    +	h *= prime2
    +	h ^= h >> 29
    +	h *= prime3
    +	h ^= h >> 32
    +
    +	return h
    +}
    +
    +func writeBlocks(x *xxh, b []byte) []byte {
    +	v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
    +	for len(b) >= 32 {
    +		v1 = round(v1, u64(b[0:8:len(b)]))
    +		v2 = round(v2, u64(b[8:16:len(b)]))
    +		v3 = round(v3, u64(b[16:24:len(b)]))
    +		v4 = round(v4, u64(b[24:32:len(b)]))
    +		b = b[32:len(b):len(b)]
    +	}
    +	x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
    +	return b
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS b/src/prometheus/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS
    new file mode 100644
    index 0000000..1b73178
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS
    @@ -0,0 +1,11 @@
    +# The list of people who have contributed code to the cmux repository.
    +#
    +# Auto-generated with:
    +#		git log --oneline --pretty=format:'%an <%aE>' | sort -u
    +#
    +Dmitri Shuralyov 
    +Ethan Mosbaugh 
    +Soheil Hassas Yeganeh 
    +Soheil Hassas Yeganeh 
    +Tamir Duberstein 
    +Tamir Duberstein 
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/LICENSE b/src/prometheus/vendor/github.com/cockroachdb/cmux/LICENSE
    new file mode 100644
    index 0000000..d645695
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/LICENSE
    @@ -0,0 +1,202 @@
    +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright [yyyy] [name of copyright owner]
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/README.md b/src/prometheus/vendor/github.com/cockroachdb/cmux/README.md
    new file mode 100644
    index 0000000..26e1737
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/README.md
    @@ -0,0 +1,72 @@
    +# cmux: Connection Mux [![Build Status](https://travis-ci.org/cockroachdb/cmux.svg?branch=master)](https://travis-ci.org/cockroachdb/cmux) [![GoDoc](https://godoc.org/github.com/cockroachdb/cmux?status.svg)](https://godoc.org/github.com/cockroachdb/cmux)
    +
    +cmux is a generic Go library to multiplex connections based on their payload.
    +Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any
    +other protocol on the same TCP listener.
    +
    +## How-To
    +Simply create your main listener, create a cmux for that listener,
    +and then match connections:
    +```go
    +// Create the main listener.
    +l, err := net.Listen("tcp", ":23456")
    +if err != nil {
    +	log.Fatal(err)
    +}
    +
    +// Create a cmux.
    +m := cmux.New(l)
    +
    +// Match connections in order:
    +// First grpc, then HTTP, and otherwise Go RPC/TCP.
    +grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
    +httpL := m.Match(cmux.HTTP1Fast())
    +trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
    +
    +// Create your protocol servers.
    +grpcS := grpc.NewServer()
    +grpchello.RegisterGreeterServer(grpcs, &server{})
    +
    +httpS := &http.Server{
    +	Handler: &helloHTTP1Handler{},
    +}
    +
    +trpcS := rpc.NewServer()
    +s.Register(&ExampleRPCRcvr{})
    +
    +// Use the muxed listeners for your servers.
    +go grpcS.Serve(grpcL)
    +go httpS.Serve(httpL)
    +go trpcS.Accept(trpcL)
    +
    +// Start serving!
    +m.Serve()
    +```
    +
    +There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples).
    +
    +## Performance
    +Since we are only matching the very first bytes of a connection, the
    +performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP
    +streams) is negligible.
    +
    +## Limitations
    +* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221)
    +to identify TLS connections; since cmux's lookahead-implementing connection
    +wraps the underlying TLS connection, this type assertion fails. This means you
    +can serve HTTPS using cmux but `http.Request.TLS` will not be set in your
    +handlers. If you are able to wrap TLS around cmux, you can work around this
    +limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an
    +example of this approach.
    +
    +* *Different Protocols on The Same Connection*: `cmux` matches the connection
    +when it's accepted. For example, one connection can be either gRPC or REST, but
    +not both. That is, we assume that a client connection is either used for gRPC
    +or REST.
    +
    +# Copyright and License
    +Copyright 2016 The CMux Authors. All rights reserved.
    +
    +See [CONTRIBUTORS](https://github.com/cockroachdb/cmux/blob/master/CONTRIBUTORS)
    +for the CMux Authors. Code is released under
    +[the Apache 2 license](https://github.com/cockroachdb/cmux/blob/master/LICENSE).
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/buffer.go b/src/prometheus/vendor/github.com/cockroachdb/cmux/buffer.go
    new file mode 100644
    index 0000000..dc4d992
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/buffer.go
    @@ -0,0 +1,49 @@
    +// Copyright 2016 The CMux Authors. All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +
    +package cmux
    +
    +import (
    +	"bytes"
    +	"io"
    +)
    +
    +// bufferedReader is an optimized implementation of io.Reader that behaves like
    +// ```
    +// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
    +// ```
    +// without allocating.
    +type bufferedReader struct {
    +	source     io.Reader
    +	buffer     *bytes.Buffer
    +	bufferRead int
    +	bufferSize int
    +}
    +
    +func (s *bufferedReader) Read(p []byte) (int, error) {
    +	// Functionality of bytes.Reader.
    +	bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
    +	s.bufferRead += bn
    +
    +	p = p[bn:]
    +
    +	// Funtionality of io.TeeReader.
    +	sn, sErr := s.source.Read(p)
    +	if sn > 0 {
    +		if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
    +			return bn + wn, wErr
    +		}
    +	}
    +	return bn + sn, sErr
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/cmux.go b/src/prometheus/vendor/github.com/cockroachdb/cmux/cmux.go
    new file mode 100644
    index 0000000..f9787fd
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/cmux.go
    @@ -0,0 +1,224 @@
    +// Copyright 2016 The CMux Authors. All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +
    +package cmux
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"net"
    +	"sync"
    +)
    +
    +// Matcher matches a connection based on its content.
    +type Matcher func(io.Reader) bool
    +
    +// ErrorHandler handles an error and returns whether
    +// the mux should continue serving the listener.
    +type ErrorHandler func(error) bool
    +
    +var _ net.Error = ErrNotMatched{}
    +
    +// ErrNotMatched is returned whenever a connection is not matched by any of
    +// the matchers registered in the multiplexer.
    +type ErrNotMatched struct {
    +	c net.Conn
    +}
    +
    +func (e ErrNotMatched) Error() string {
    +	return fmt.Sprintf("mux: connection %v not matched by an matcher",
    +		e.c.RemoteAddr())
    +}
    +
    +// Temporary implements the net.Error interface.
    +func (e ErrNotMatched) Temporary() bool { return true }
    +
    +// Timeout implements the net.Error interface.
    +func (e ErrNotMatched) Timeout() bool { return false }
    +
    +type errListenerClosed string
    +
    +func (e errListenerClosed) Error() string   { return string(e) }
    +func (e errListenerClosed) Temporary() bool { return false }
    +func (e errListenerClosed) Timeout() bool   { return false }
    +
    +// ErrListenerClosed is returned from muxListener.Accept when the underlying
    +// listener is closed.
    +var ErrListenerClosed = errListenerClosed("mux: listener closed")
    +
    +// New instantiates a new connection multiplexer.
    +func New(l net.Listener) CMux {
    +	return &cMux{
    +		root:   l,
    +		bufLen: 1024,
    +		errh:   func(_ error) bool { return true },
    +		donec:  make(chan struct{}),
    +	}
    +}
    +
    +// CMux is a multiplexer for network connections.
    +type CMux interface {
    +	// Match returns a net.Listener that sees (i.e., accepts) only
    +	// the connections matched by at least one of the matcher.
    +	//
    +	// The order used to call Match determines the priority of matchers.
    +	Match(...Matcher) net.Listener
    +	// Serve starts multiplexing the listener. Serve blocks and perhaps
    +	// should be invoked concurrently within a go routine.
    +	Serve() error
    +	// HandleError registers an error handler that handles listener errors.
    +	HandleError(ErrorHandler)
    +}
    +
    +type matchersListener struct {
    +	ss []Matcher
    +	l  muxListener
    +}
    +
    +type cMux struct {
    +	root   net.Listener
    +	bufLen int
    +	errh   ErrorHandler
    +	donec  chan struct{}
    +	sls    []matchersListener
    +}
    +
    +func (m *cMux) Match(matchers ...Matcher) net.Listener {
    +	ml := muxListener{
    +		Listener: m.root,
    +		connc:    make(chan net.Conn, m.bufLen),
    +	}
    +	m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
    +	return ml
    +}
    +
    +func (m *cMux) Serve() error {
    +	var wg sync.WaitGroup
    +
    +	defer func() {
    +		close(m.donec)
    +		wg.Wait()
    +
    +		for _, sl := range m.sls {
    +			close(sl.l.connc)
    +			// Drain the connections enqueued for the listener.
    +			for c := range sl.l.connc {
    +				_ = c.Close()
    +			}
    +		}
    +	}()
    +
    +	for {
    +		c, err := m.root.Accept()
    +		if err != nil {
    +			if !m.handleErr(err) {
    +				return err
    +			}
    +			continue
    +		}
    +
    +		wg.Add(1)
    +		go m.serve(c, m.donec, &wg)
    +	}
    +}
    +
    +func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
    +	defer wg.Done()
    +
    +	muc := newMuxConn(c)
    +	for _, sl := range m.sls {
    +		for _, s := range sl.ss {
    +			matched := s(muc.getSniffer())
    +			if matched {
    +				select {
    +				case sl.l.connc <- muc:
    +				case <-donec:
    +					_ = c.Close()
    +				}
    +				return
    +			}
    +		}
    +	}
    +
    +	_ = c.Close()
    +	err := ErrNotMatched{c: c}
    +	if !m.handleErr(err) {
    +		_ = m.root.Close()
    +	}
    +}
    +
    +func (m *cMux) HandleError(h ErrorHandler) {
    +	m.errh = h
    +}
    +
    +func (m *cMux) handleErr(err error) bool {
    +	if !m.errh(err) {
    +		return false
    +	}
    +
    +	if ne, ok := err.(net.Error); ok {
    +		return ne.Temporary()
    +	}
    +
    +	return false
    +}
    +
    +type muxListener struct {
    +	net.Listener
    +	connc chan net.Conn
    +}
    +
    +func (l muxListener) Accept() (net.Conn, error) {
    +	c, ok := <-l.connc
    +	if !ok {
    +		return nil, ErrListenerClosed
    +	}
    +	return c, nil
    +}
    +
    +// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
    +type MuxConn struct {
    +	net.Conn
    +	buf     bytes.Buffer
    +	sniffer bufferedReader
    +}
    +
    +func newMuxConn(c net.Conn) *MuxConn {
    +	return &MuxConn{
    +		Conn: c,
    +	}
    +}
    +
    +// From the io.Reader documentation:
    +//
    +// When Read encounters an error or end-of-file condition after
    +// successfully reading n > 0 bytes, it returns the number of
    +// bytes read.  It may return the (non-nil) error from the same call
    +// or return the error (and n == 0) from a subsequent call.
    +// An instance of this general case is that a Reader returning
    +// a non-zero number of bytes at the end of the input stream may
    +// return either err == EOF or err == nil.  The next Read should
    +// return 0, EOF.
    +func (m *MuxConn) Read(p []byte) (int, error) {
    +	if n, err := m.buf.Read(p); err != io.EOF {
    +		return n, err
    +	}
    +	return m.Conn.Read(p)
    +}
    +
    +func (m *MuxConn) getSniffer() io.Reader {
    +	m.sniffer = bufferedReader{source: m.Conn, buffer: &m.buf, bufferSize: m.buf.Len()}
    +	return &m.sniffer
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/doc.go b/src/prometheus/vendor/github.com/cockroachdb/cmux/doc.go
    new file mode 100644
    index 0000000..aaa8f31
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/doc.go
    @@ -0,0 +1,18 @@
    +// Copyright 2016 The CMux Authors. All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +
    +// Package cmux is a library to multiplex network connections based on
    +// their payload. Using cmux, you can serve different protocols from the
    +// same listener.
    +package cmux
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/matchers.go b/src/prometheus/vendor/github.com/cockroachdb/cmux/matchers.go
    new file mode 100644
    index 0000000..9399098
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/matchers.go
    @@ -0,0 +1,164 @@
    +// Copyright 2016 The CMux Authors. All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +
    +package cmux
    +
    +import (
    +	"bufio"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"strings"
    +
    +	"golang.org/x/net/http2"
    +	"golang.org/x/net/http2/hpack"
    +)
    +
    +// Any is a Matcher that matches any connection.
    +func Any() Matcher {
    +	return func(r io.Reader) bool { return true }
    +}
    +
    +// PrefixMatcher returns a matcher that matches a connection if it
    +// starts with any of the strings in strs.
    +func PrefixMatcher(strs ...string) Matcher {
    +	pt := newPatriciaTreeString(strs...)
    +	return pt.matchPrefix
    +}
    +
    +var defaultHTTPMethods = []string{
    +	"OPTIONS",
    +	"GET",
    +	"HEAD",
    +	"POST",
    +	"PUT",
    +	"DELETE",
    +	"TRACE",
    +	"CONNECT",
    +}
    +
    +// HTTP1Fast only matches the methods in the HTTP request.
    +//
    +// This matcher is very optimistic: if it returns true, it does not mean that
    +// the request is a valid HTTP response. If you want a correct but slower HTTP1
    +// matcher, use HTTP1 instead.
    +func HTTP1Fast(extMethods ...string) Matcher {
    +	return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
    +}
    +
    +const maxHTTPRead = 4096
    +
    +// HTTP1 parses the first line or upto 4096 bytes of the request to see if
    +// the conection contains an HTTP request.
    +func HTTP1() Matcher {
    +	return func(r io.Reader) bool {
    +		br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
    +		l, part, err := br.ReadLine()
    +		if err != nil || part {
    +			return false
    +		}
    +
    +		_, _, proto, ok := parseRequestLine(string(l))
    +		if !ok {
    +			return false
    +		}
    +
    +		v, _, ok := http.ParseHTTPVersion(proto)
    +		return ok && v == 1
    +	}
    +}
    +
    +// grabbed from net/http.
    +func parseRequestLine(line string) (method, uri, proto string, ok bool) {
    +	s1 := strings.Index(line, " ")
    +	s2 := strings.Index(line[s1+1:], " ")
    +	if s1 < 0 || s2 < 0 {
    +		return
    +	}
    +	s2 += s1 + 1
    +	return line[:s1], line[s1+1 : s2], line[s2+1:], true
    +}
    +
    +// HTTP2 parses the frame header of the first frame to detect whether the
    +// connection is an HTTP2 connection.
    +func HTTP2() Matcher {
    +	return hasHTTP2Preface
    +}
    +
    +// HTTP1HeaderField returns a matcher matching the header fields of the first
    +// request of an HTTP 1 connection.
    +func HTTP1HeaderField(name, value string) Matcher {
    +	return func(r io.Reader) bool {
    +		return matchHTTP1Field(r, name, value)
    +	}
    +}
    +
    +// HTTP2HeaderField resturns a matcher matching the header fields of the first
    +// headers frame.
    +func HTTP2HeaderField(name, value string) Matcher {
    +	return func(r io.Reader) bool {
    +		return matchHTTP2Field(r, name, value)
    +	}
    +}
    +
    +func hasHTTP2Preface(r io.Reader) bool {
    +	var b [len(http2.ClientPreface)]byte
    +	if _, err := io.ReadFull(r, b[:]); err != nil {
    +		return false
    +	}
    +
    +	return string(b[:]) == http2.ClientPreface
    +}
    +
    +func matchHTTP1Field(r io.Reader, name, value string) (matched bool) {
    +	req, err := http.ReadRequest(bufio.NewReader(r))
    +	if err != nil {
    +		return false
    +	}
    +
    +	return req.Header.Get(name) == value
    +}
    +
    +func matchHTTP2Field(r io.Reader, name, value string) (matched bool) {
    +	if !hasHTTP2Preface(r) {
    +		return false
    +	}
    +
    +	framer := http2.NewFramer(ioutil.Discard, r)
    +	hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
    +		if hf.Name == name && hf.Value == value {
    +			matched = true
    +		}
    +	})
    +	for {
    +		f, err := framer.ReadFrame()
    +		if err != nil {
    +			return false
    +		}
    +
    +		switch f := f.(type) {
    +		case *http2.HeadersFrame:
    +			if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
    +				return false
    +			}
    +			if matched {
    +				return true
    +			}
    +
    +			if f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 {
    +				return false
    +			}
    +		}
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cmux/patricia.go b/src/prometheus/vendor/github.com/cockroachdb/cmux/patricia.go
    new file mode 100644
    index 0000000..c3e3d85
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cmux/patricia.go
    @@ -0,0 +1,179 @@
    +// Copyright 2016 The CMux Authors. All rights reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +
    +package cmux
    +
    +import (
    +	"bytes"
    +	"io"
    +)
    +
    +// patriciaTree is a simple patricia tree that handles []byte instead of string
    +// and cannot be changed after instantiation.
    +type patriciaTree struct {
    +	root     *ptNode
    +	maxDepth int // max depth of the tree.
    +}
    +
    +func newPatriciaTree(bs ...[]byte) *patriciaTree {
    +	max := 0
    +	for _, b := range bs {
    +		if max < len(b) {
    +			max = len(b)
    +		}
    +	}
    +	return &patriciaTree{
    +		root:     newNode(bs),
    +		maxDepth: max + 1,
    +	}
    +}
    +
    +func newPatriciaTreeString(strs ...string) *patriciaTree {
    +	b := make([][]byte, len(strs))
    +	for i, s := range strs {
    +		b[i] = []byte(s)
    +	}
    +	return newPatriciaTree(b...)
    +}
    +
    +func (t *patriciaTree) matchPrefix(r io.Reader) bool {
    +	buf := make([]byte, t.maxDepth)
    +	n, _ := io.ReadFull(r, buf)
    +	return t.root.match(buf[:n], true)
    +}
    +
    +func (t *patriciaTree) match(r io.Reader) bool {
    +	buf := make([]byte, t.maxDepth)
    +	n, _ := io.ReadFull(r, buf)
    +	return t.root.match(buf[:n], false)
    +}
    +
    +type ptNode struct {
    +	prefix   []byte
    +	next     map[byte]*ptNode
    +	terminal bool
    +}
    +
    +func newNode(strs [][]byte) *ptNode {
    +	if len(strs) == 0 {
    +		return &ptNode{
    +			prefix:   []byte{},
    +			terminal: true,
    +		}
    +	}
    +
    +	if len(strs) == 1 {
    +		return &ptNode{
    +			prefix:   strs[0],
    +			terminal: true,
    +		}
    +	}
    +
    +	p, strs := splitPrefix(strs)
    +	n := &ptNode{
    +		prefix: p,
    +	}
    +
    +	nexts := make(map[byte][][]byte)
    +	for _, s := range strs {
    +		if len(s) == 0 {
    +			n.terminal = true
    +			continue
    +		}
    +		nexts[s[0]] = append(nexts[s[0]], s[1:])
    +	}
    +
    +	n.next = make(map[byte]*ptNode)
    +	for first, rests := range nexts {
    +		n.next[first] = newNode(rests)
    +	}
    +
    +	return n
    +}
    +
    +func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
    +	if len(bss) == 0 || len(bss[0]) == 0 {
    +		return prefix, bss
    +	}
    +
    +	if len(bss) == 1 {
    +		return bss[0], [][]byte{{}}
    +	}
    +
    +	for i := 0; ; i++ {
    +		var cur byte
    +		eq := true
    +		for j, b := range bss {
    +			if len(b) <= i {
    +				eq = false
    +				break
    +			}
    +
    +			if j == 0 {
    +				cur = b[i]
    +				continue
    +			}
    +
    +			if cur != b[i] {
    +				eq = false
    +				break
    +			}
    +		}
    +
    +		if !eq {
    +			break
    +		}
    +
    +		prefix = append(prefix, cur)
    +	}
    +
    +	rest = make([][]byte, 0, len(bss))
    +	for _, b := range bss {
    +		rest = append(rest, b[len(prefix):])
    +	}
    +
    +	return prefix, rest
    +}
    +
    +func (n *ptNode) match(b []byte, prefix bool) bool {
    +	l := len(n.prefix)
    +	if l > 0 {
    +		if l > len(b) {
    +			l = len(b)
    +		}
    +		if !bytes.Equal(b[:l], n.prefix) {
    +			return false
    +		}
    +	}
    +
    +	if n.terminal && (prefix || len(n.prefix) == len(b)) {
    +		return true
    +	}
    +
    +	if l >= len(b) {
    +		return false
    +	}
    +
    +	nextN, ok := n.next[b[l]]
    +	if !ok {
    +		return false
    +	}
    +
    +	if l == len(b) {
    +		b = b[l:l]
    +	} else {
    +		b = b[l+1:]
    +	}
    +	return nextN.match(b, prefix)
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/LICENSE b/src/prometheus/vendor/github.com/cockroachdb/cockroach/LICENSE
    new file mode 100644
    index 0000000..2e8d0f1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/LICENSE
    @@ -0,0 +1,419 @@
    +CockroachDB Community License Agreement
    +
    +  Please read this CockroachDB Community License Agreement (the "Agreement")
    +  carefully before using CockroachDB (as defined below), which is offered by
    +  Cockroach Labs, Inc. or its affiliated Legal Entities ("Cockroach Labs").
    +
    +  By downloading CockroachDB or using it in any manner, You agree that You have
    +  read and agree to be bound by the terms of this Agreement.  If You are
    +  accessing CockroachDB on behalf of a Legal Entity, You represent and warrant
    +  that You have the authority to agree to these terms on its behalf and the
    +  right to bind that Legal Entity to this Agreement.  Use of CockroachDB is
    +  expressly conditioned upon Your assent to all the terms of this Agreement, to
    +  the exclusion of all other terms.
    +
    +  1. Definitions.  In addition to other terms defined elsewhere in this
    +     Agreement, the terms below have the following meanings.
    +
    +    (a) "CockroachDB" shall mean the SQL database software provided by Cockroach
    +        Labs, including both CockroachDB Community and CockroachDB Enterprise
    +        editions, as defined below.
    +
    +    (b) "CockroachDB Community Edition" shall mean the open source version of
    +        CockroachDB, available free of charge at
    +
    +            https://github.com/cockroachdb/cockroach
    +
    +    (c) "CockroachDB Enterprise Edition" shall mean the additional features made
    +        available by Cockroach Labs, the use of which is subject to additional
    +        terms set out below.
    +
    +    (d) "Contribution" shall mean any work of authorship, including the original
    +        version of the Work and any modifications or additions to that Work or
    +        Derivative Works thereof, that is intentionally submitted Cockroach Labs
    +        for inclusion in the Work by the copyright owner or by an individual or
    +        Legal Entity authorized to submit on behalf of the copyright owner.  For
    +        the purposes of this definition, "submitted" means any form of
    +        electronic, verbal, or written communication sent to Cockroach Labs or
    +        its representatives, including but not limited to communication on
    +        electronic mailing lists, source code control systems, and issue
    +        tracking systems that are managed by, or on behalf of, Cockroach Labs
    +        for the purpose of discussing and improving the Work, but excluding
    +        communication that is conspicuously marked or otherwise designated in
    +        writing by the copyright owner as "Not a Contribution."
    +
    +    (e) "Contributor" shall mean any copyright owner or individual or Legal
    +        Entity authorized by the copyright owner, other than Cockroach Labs,
    +        from whom Cockroach Labs receives a Contribution that Cockroach Labs
    +        subsequently incorporates within the Work.
    +
    +    (f) "Derivative Works" shall mean any work, whether in Source or Object
    +        form, that is based on (or derived from) the Work, such as a
    +        translation, abridgement, condensation, or any other recasting,
    +        transformation, or adaptation for which the editorial revisions,
    +        annotations, elaborations, or other modifications represent, as a whole,
    +        an original work of authorship. For the purposes of this License,
    +        Derivative Works shall not include works that remain separable from, or
    +        merely link (or bind by name) to the interfaces of, the Work and
    +        Derivative Works thereof.
    +
    +    (g) "Legal Entity" shall mean the union of the acting entity and all other
    +        entities that control, are controlled by, or are under common control
    +        with that entity.  For the purposes of this definition, "control" means
    +        (i) the power, direct or indirect, to cause the direction or management
    +        of such entity, whether by contract or otherwise, or (ii) ownership of
    +        fifty percent (50%) or more of the outstanding shares, or (iii)
    +        beneficial ownership of such entity.
    +
    +    (h) "License" shall mean the terms and conditions for use, reproduction, and
    +        distribution of a Work as defined by this Agreement.
    +
    +    (i) "Licensor" shall mean Cockroach Labs or a Contributor, as applicable.
    +
    +    (j) "Object" form shall mean any form resulting from mechanical
    +        transformation or translation of a Source form, including but not
    +        limited to compiled object code, generated documentation, and
    +        conversions to other media types.
    +
    +    (k) "Source" form shall mean the preferred form for making modifications,
    +        including but not limited to software source code, documentation source,
    +        and configuration files.
    +
    +    (l) "Third Party Works" shall mean Works, including Contributions, and other
    +        technology owned by a person or Legal Entity other than Cockroach Labs,
    +        as indicated by a copyright notice that is included in or attached to
    +        such Works or technology.
    +
    +    (m) "Work" shall mean the work of authorship, whether in Source or Object
    +        form, made available under a License, as indicated by a copyright notice
    +        that is included in or attached to the work.
    +
    +    (n) "You" (or "Your") shall mean an individual or Legal Entity exercising
    +        permissions granted by this License.
    +
    +  2. Licenses.
    +
    +    (a) License to CockroachDB Community Edition.  The License for CockroachDB
    +        Community Edition is the Apache License, Version 2.0 ("Apache License").
    +        The Apache License includes a grant of patent license, as well as
    +        redistribution rights that are contingent on several requirements.
    +        Please see
    +
    +            http://www.apache.org/licenses/LICENSE-2.0
    +
    +        for full terms.  CockroachDB Community Edition is a no-cost, entry-level
    +        license and as such, contains the following disclaimers: NOTWITHSTANDING
    +        ANYTHING TO THE CONTRARY HEREIN, COCKROACHDB COMMUNITY EDITION IS
    +        PROVIDED "AS IS" AND "AS AVAILABLE", AND ALL EXPRESS OR IMPLIED
    +        WARRANTIES ARE EXCLUDED AND DISCLAIMED, INCLUDING WITHOUT LIMITATION THE
    +        IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
    +        NON-INFRINGEMENT, AND ANY WARRANTIES ARISING BY STATUTE OR OTHERWISE IN
    +        LAW OR FROM COURSE OF DEALING, COURSE OF PERFORMANCE, OR USE IN TRADE.
    +        For clarity, the terms of this Agreement, other than the relevant
    +        definitions in Section 1 and this Section 2(a) do not apply to
    +        CockroachDB Community Edition.
    +
    +    (b) License to CockroachDB Enterprise Edition.
    +
    +      i   Grant of Copyright License: Subject to the terms of this Agreement,
    +          Licensor hereby grants to You a worldwide, non-exclusive,
    +          non-transferable limited license to reproduce, prepare Enterprise
    +          Derivative Works (as defined below) of, publicly display, publicly
    +          perform, sublicense, and distribute CockroachDB Enterprise Edition
    +          for Your business purposes, for so long as You are not in violation
    +          of this Section 2(b) and are current on all payments required by
    +          Section 4 below.
    +
    +      ii  Grant of Patent License: Subject to the terms of this Agreement,
    +          Licensor hereby grants to You a worldwide, non-exclusive,
    +          non-transferable limited patent license to make, have made, use,
    +          offer to sell, sell, import, and otherwise transfer CockroachDB
    +          Enterprise Edition, where such license applies only to those patent
    +          claims licensable by Licensor that are necessarily infringed by
    +          their Contribution(s) alone or by combination of their
    +          Contribution(s) with the Work to which such Contribution(s) was
    +          submitted.  If You institute patent litigation against any entity
    +          (including a cross-claim or counterclaim in a lawsuit) alleging that
    +          the Work or a Contribution incorporated within the Work constitutes
    +          direct or contributory patent infringement, then any patent licenses
    +          granted to You under this License for that Work shall terminate as
    +          of the date such litigation is filed.
    +
    +      iii License to Third Party Works:  From time to time Cockroach Labs may
    +          use, or provide You access to, Third Party Works in connection
    +          CockroachDB Enterprise Edition.  You acknowledge and agree that in
    +          addition to this Agreement, Your use of Third Party Works is subject
    +          to all other terms and conditions set forth in the License provided
    +          with or contained in such Third Party Works.  Some Third Party Works
    +          may be licensed to You solely for use with CockroachDB Enterprise
    +          Edition under the terms of a third party License, or as otherwise
    +          notified by Cockroach Labs, and not under the terms of this
    +          Agreement.  You agree that the owners and third party licensors of
    +          Third Party Works are intended third party beneficiaries to this
    +          Agreement.
    +
    +  3. Support.  From time to time, in its sole discretion, Cockroach Labs may
    +     offer professional services or support for CockroachDB, which may now or in
    +     the future be subject to additional fees.
    +
    +  4. Fees for CockroachDB Enterprise Edition or CockroachDB Support.
    +
    +    (a) Fees.  The License to CockroachDB Enterprise Edition is conditioned upon
    +        Your payment of the fees specified on
    +
    +            https://cockroachlabs.com/pricing
    +
    +        which You agree to pay to Cockroach Labs in accordance with the payment
    +        terms set out on that page.  Any professional services or support for
    +        CockroachDB may also be subject to Your payment of fees, which will be
    +        specified by Cockroach Labs when you sign up to receive such
    +        professional services or support. Cockroach Labs reserves the right to
    +        change the fees at any time with prior written notice; for recurring
    +        fees, any such adjustments will take effect as of the next pay period.
    +
    +    (b) Overdue Payments and Taxes. Overdue payments are subject to a service
    +        charge equal to the lesser of 1.5% per month or the maximum legal
    +        interest rate allowed by law, and You shall pay all Cockroach Labs’
    +        reasonable costs of collection, including court costs and attorneys’
    +        fees.  Fees are stated and payable in U.S. dollars and are exclusive of
    +        all sales, use, value added and similar taxes, duties, withholdings and
    +        other governmental assessments (but excluding taxes based on Cockroach
    +        Labs’ income) that may be levied on the transactions contemplated by
    +        this Agreement in any jurisdiction, all of which are Your responsibility
    +        unless you have provided Cockroach Labs with a valid tax-exempt
    +        certificate.
    +
    +    (c) Record-keeping and Audit.  If fees for CockroachDB Enterprise Edition
    +        are based on the number of cores or servers running on CockroachDB
    +        Enterprise Edition or another use-based unit of measurement, You must
    +        maintain complete and accurate records with respect to Your use of
    +        CockroachDB Enterprise Edition and will provide such records to
    +        Cockroach Labs for inspection or audit upon Cockroach Labs’ reasonable
    +        request.  If an inspection or audit uncovers additional usage by You for
    +        which fees are owed under this Agreement, then You shall pay for such
    +        additional usage at Cockroach Labs’ then-current rates.
    +
    +  5. Trial License.  If You have signed up for a trial or evaluation of
    +     CockroachDB Enterprise Edition, Your License to CockroachDB Enterprise
    +     Edition is granted without charge for the trial or evaluation period
    +     specified when You signed up, or if no term was specified, for thirty (30)
    +     calendar days, provided that Your License is granted solely for purposes of
    +     Your internal evaluation of CockroachDB Enterprise Edition during the trial
    +     or evaluation period (a "Trial License").  You may not use CockroachDB
    +     Enterprise Edition under a Trial License more than once in any twelve (12)
    +     month period.  Cockroach Labs may revoke a Trial License at any time and
    +     for any reason.  Sections 3, 4, 9 and 11 of this Agreement do not apply to
    +     Trial Licenses.
    +
    +  6. Redistribution.  You may reproduce and distribute copies of the Work or
    +     Derivative Works thereof in any medium, with or without modifications, and
    +     in Source or Object form, provided that You meet the following conditions:
    +
    +    (a) You must give any other recipients of the Work or Derivative Works a
    +        copy of this License; and
    +
    +    (b) You must cause any modified files to carry prominent notices stating
    +        that You changed the files; and
    +
    +    (c) You must retain, in the Source form of any Derivative Works that You
    +        distribute, all copyright, patent, trademark, and attribution notices
    +        from the Source form of the Work, excluding those notices that do not
    +        pertain to any part of the Derivative Works; and
    +
    +    (d) If the Work includes a "NOTICE" text file as part of its distribution,
    +        then any Derivative Works that You distribute must include a readable
    +        copy of the attribution notices contained within such NOTICE file,
    +        excluding those notices that do not pertain to any part of the
    +        Derivative Works, in at least one of the following places: within a
    +        NOTICE text file distributed as part of the Derivative Works; within the
    +        Source form or documentation, if provided along with the Derivative
    +        Works; or, within a display generated by the Derivative Works, if and
    +        wherever such third-party notices normally appear.  The contents of the
    +        NOTICE file are for informational purposes only and do not modify the
    +        License.  You may add Your own attribution notices within Derivative
    +        Works that You distribute, alongside or as an addendum to the NOTICE
    +        text from the Work, provided that such additional attribution notices
    +        cannot be construed as modifying the License.
    +
    +        You may add Your own copyright statement to Your modifications and may
    +        provide additional or different license terms and conditions for use,
    +        reproduction, or distribution of Your modifications, or for any such
    +        Derivative Works as a whole, provided Your use, reproduction, and
    +        distribution of the Work otherwise complies with the conditions stated
    +        in this License.
    +
    +    (e) Enterprise Derivative Works: Derivative Works of CockroachDB Enterprise
    +        Edition ("Enterprise Derivative Works") may be made, reproduced and
    +        distributed in any medium, with or without modifications, in Source or
    +        Object form, provided that each Enterprise Derivative Work will be
    +        considered to include a License to CockroachDB Enterprise Edition and
    +        thus will be subject to the payment of fees to Cockroach Labs by any
    +        user of the Enterprise Derivative Work.
    +
    +  7. Submission of Contributions. Unless You explicitly state otherwise, any
    +     Contribution intentionally submitted for inclusion in CockroachDB by You to
    +     Cockroach Labs shall be under the terms and conditions of
    +
    +         https://cla-assistant.io/cockroachdb/cockroach
    +
    +     (which is based off of the Apache License), without any additional terms or
    +     conditions, payments of royalties or otherwise to Your benefit.
    +     Notwithstanding the above, nothing herein shall supersede or modify the
    +     terms of any separate license agreement You may have executed with
    +     Cockroach Labs regarding such Contributions.
    +
    +  8. Trademarks.  This License does not grant permission to use the trade names,
    +     trademarks, service marks, or product names of Licensor, except as required
    +     for reasonable and customary use in describing the origin of the Work and
    +     reproducing the content of the NOTICE file.
    +
    +  9. Limited Warranty.
    +
    +    (a) Warranties.  Cockroach Labs warrants to You that: (i) CockroachDB
    +        Enterprise Edition will materially perform in accordance with the
    +        applicable documentation for ninety (90) days after initial delivery to
    +        You; and (ii) any professional services performed by Cockroach Labs
    +        under this Agreement will be performed in a workmanlike manner, in
    +        accordance with general industry standards.
    +
    +    (b) Exclusions.  Cockroach Labs’ warranties in this Section 9 do not extend
    +        to problems that result from: (i) Your failure to implement updates
    +        issued by Cockroach Labs during the warranty period; (ii) any
    +        alterations or additions (including Enterprise Derivative Works and
    +        Contributions) to CockroachDB not performed by or at the direction of
    +        Cockroach Labs; (iii) failures that are not reproducible by Cockroach
    +        Labs; (iv) operation of CockroachDB Enterprise Edition in violation of
    +        this Agreement or not in accordance with its documentation; (v) failures
    +        caused by software, hardware or products not licensed or provided by
    +        Cockroach Labs hereunder; or (vi) Third Party Works.
    +
    +    (c) Remedies.  In the event of a breach of a warranty under this Section 9,
    +        Cockroach Labs will, at its discretion and cost, either repair, replace
    +        or re-perform the applicable Works or services or refund a portion of
    +        fees previously paid to Cockroach Labs that are associated with the
    +        defective Works or services. This is Your exclusive remedy, and
    +        Cockroach Labs’ sole liability, arising in connection with the limited
    +        warranties herein.
    +
    +  10. Disclaimer of Warranty.  Except as set out in Section 9, unless required
    +      by applicable law, Licensor provides the Work (and each Contributor
    +      provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR
    +      CONDITIONS OF ANY KIND, either express or implied, arising out of course
    +      of dealing, course of performance, or usage in trade, including, without
    +      limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT,
    +      MERCHANTABILITY, CORRECTNESS, RELIABILITY, or FITNESS FOR A PARTICULAR
    +      PURPOSE, all of which are hereby disclaimed.  You are solely responsible
    +      for determining the appropriateness of using or redistributing Works and
    +      assume any risks associated with Your exercise of permissions under the
    +      applicable License for such Works.
    +
    +  11. Limited Indemnity.
    +
    +    (a) Indemnity.  Cockroach Labs will defend, indemnify and hold You harmless
    +        against any third party claims, liabilities or expenses incurred
    +        (including reasonable attorneys’ fees), as well as amounts finally
    +        awarded in a settlement or a non-appealable judgement by a court
    +        ("Losses"), to the extent arising from any claim or allegation by a
    +        third party that CockroachDB Enterprise Edition infringes or
    +        misappropriates a valid United States patent, copyright or trade secret
    +        right of a third party; provided that You give Cockroach Labs: (i)
    +        prompt written notice of any such claim or allegation; (ii) sole control
    +        of the defense and settlement thereof; and (iii) reasonable cooperation
    +        and assistance in such defense or settlement.  If any Work within
    +        CockroachDB Enterprise Edition becomes or, in Cockroach Labs’ opinion,
    +        is likely to become, the subject of an injunction, Cockroach Labs may,
    +        at its option, (A) procure for You the right to continue using such
    +        Work, (B) replace or modify such Work so that it becomes non-infringing
    +        without substantially compromising its functionality, or, if (A) and (B)
    +        are not commercially practicable, then (C) terminate Your license to the
    +        allegedly infringing Work and refund to You a prorated portion of the
    +        prepaid and unearned fees for such infringing Work.  The foregoing
    +        states the entire liability of Cockroach Labs with respect to
    +        infringement of patents, copyrights, trade secrets or other intellectual
    +        property rights.
    +
    +    (b) Exclusions.  The foregoing obligations shall not apply to: (i) Works
    +        modified by any party other than Cockroach Labs (including Enterprise
    +        Derivative Works and Contributions), if the alleged infringement relates
    +        to such modification, (ii) Works combined or bundled with any products,
    +        processes or materials not provided by Cockroach Labs where the alleged
    +        infringement relates to such combination, (iii) use of a version of
    +        CockroachDB Enterprise Edition other than the version that was current
    +        at the time of such use, as long as a non-infringing version had been
    +        released, (iv) any Works created to Your specifications, (v)
    +        infringement or misappropriation of any proprietary right in which You
    +        have an interest, or (vi) Third Party Works.  You will defend, indemnify
    +        and hold Cockroach Labs harmless against any Losses arising from any
    +        such claim or allegation, subject to conditions reciprocal to those in
    +        Section 11(a).
    +
    +  12. Limitation of Liability.  In no event and under no legal or equitable
    +      theory, whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts), and notwithstanding anything in this Agreement to the
    +      contrary, shall Licensor or any Contributor be liable to You for (i) any
    +      amounts in excess, in the aggregate, of the fees paid by You to Cockroach
    +      Labs under this Agreement in the twelve (12) months preceding the date the
    +      first cause of liability arose), or (ii) any indirect, special,
    +      incidental, punitive, exemplary, reliance, or consequential damages of any
    +      character arising as a result of this Agreement or out of the use or
    +      inability to use the Work (including but not limited to damages for loss
    +      of goodwill, profits, data or data use, work stoppage, computer failure or
    +      malfunction, cost of procurement of substitute goods, technology or
    +      services, or any and all other commercial damages or losses), even if such
    +      Licensor or Contributor has been advised of the possibility of such
    +      damages. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE
    +      ESSENTIAL PURPOSE OF ANY LIMITED REMEDY.
    +
    +  13. Accepting Warranty or Additional Liability.  While redistributing Works or
    +      Derivative Works thereof, and without limiting your obligations under
    +      Section 6, You may choose to offer, and charge a fee for, acceptance of
    +      support, warranty, indemnity, or other liability obligations and/or rights
    +      consistent with this License.  However, in accepting such obligations, You
    +      may act only on Your own behalf and on Your sole responsibility, not on
    +      behalf of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold Cockroach Labs and each other Contributor harmless for
    +      any liability incurred by, or claims asserted against, such Contributor by
    +      reason of your accepting any such warranty or additional liability.
    +
    +  14. General.
    +
    +    (a) Relationship of Parties.  You and Cockroach Labs are independent
    +        contractors, and nothing herein shall be deemed to constitute either
    +        party as the agent or representative of the other or both parties as
    +        joint venturers or partners for any purpose.
    +
    +    (b) Export Control.  You shall comply with the U.S. Foreign Corrupt
    +        Practices Act and all applicable export laws, restrictions and
    +        regulations of the U.S. Department of Commerce, and any other applicable
    +        U.S. and foreign authority.
    +
    +    (c) Assignment.  This Agreement and the rights and obligations herein may
    +        not be assigned or transferred, in whole or in part, by You without the
    +        prior written consent of Cockroach Labs.  Any assignment in violation of
    +        this provision is void.  This Agreement shall be binding upon, and inure
    +        to the benefit of, the successors and permitted assigns of the parties.
    +
    +    (d) Governing Law.  This Agreement shall be governed by and construed under
    +        the laws of the State of New York and the United States without regard
    +        to conflicts of laws provisions thereof, and without regard to the
    +        Uniform Computer Information Transactions Act.
    +
    +    (e) Attorneys’ Fees.  In any action or proceeding to enforce rights under
    +        this Agreement, the prevailing party shall be entitled to recover its
    +        costs, expenses and attorneys’ fees.
    +
    +    (f) Severability.  If any provision of this Agreement is held to be invalid,
    +        illegal or unenforceable in any respect, that provision shall be limited
    +        or eliminated to the minimum extent necessary so that this Agreement
    +        otherwise remains in full force and effect and enforceable.
    +
    +    (g) Entire Agreement; Waivers; Modification.  This Agreement constitutes the
    +        entire agreement between the parties relating to the subject matter
    +        hereof and supersedes all proposals, understandings, or discussions,
    +        whether written or oral, relating to the subject matter of this
    +        Agreement and all past dealing or industry custom. The failure of either
    +        party to enforce its rights under this Agreement at any time for any
    +        period shall not be construed as a waiver of such rights. No changes,
    +        modifications or waivers to this Agreement will be effective unless in
    +        writing and signed by both parties.
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/httputil/http.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/httputil/http.go
    new file mode 100644
    index 0000000..8f9878a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/httputil/http.go
    @@ -0,0 +1,95 @@
    +// Copyright 2014 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Spencer Kimball (spencer.kimball@gmail.com)
    +
    +package httputil
    +
    +import (
    +	"bytes"
    +	"io/ioutil"
    +	"net/http"
    +	"strconv"
    +
    +	"github.com/gogo/protobuf/jsonpb"
    +	"github.com/gogo/protobuf/proto"
    +	"github.com/pkg/errors"
    +)
    +
    +const (
    +	// AcceptHeader is the canonical header name for accept.
    +	AcceptHeader = "Accept"
    +	// AcceptEncodingHeader is the canonical header name for accept encoding.
    +	AcceptEncodingHeader = "Accept-Encoding"
    +	// ContentEncodingHeader is the canonical header name for content type.
    +	ContentEncodingHeader = "Content-Encoding"
    +	// ContentTypeHeader is the canonical header name for content type.
    +	ContentTypeHeader = "Content-Type"
    +	// JSONContentType is the JSON content type.
    +	JSONContentType = "application/json"
    +	// AltJSONContentType is the alternate JSON content type.
    +	AltJSONContentType = "application/x-json"
    +	// ProtoContentType is the protobuf content type.
    +	ProtoContentType = "application/x-protobuf"
    +	// AltProtoContentType is the alternate protobuf content type.
    +	AltProtoContentType = "application/x-google-protobuf"
    +	// PlaintextContentType is the plaintext content type.
    +	PlaintextContentType = "text/plain"
    +	// GzipEncoding is the gzip encoding.
    +	GzipEncoding = "gzip"
    +)
    +
    +// GetJSON uses the supplied client to GET the URL specified by the parameters
    +// and unmarshals the result into response.
    +func GetJSON(httpClient http.Client, path string, response proto.Message) error {
    +	req, err := http.NewRequest("GET", path, nil)
    +	if err != nil {
    +		return err
    +	}
    +	return doJSONRequest(httpClient, req, response)
    +}
    +
    +// PostJSON uses the supplied client to POST request to the URL specified by
    +// the parameters and unmarshals the result into response.
    +func PostJSON(httpClient http.Client, path string, request, response proto.Message) error {
    +	// Hack to avoid upsetting TestProtoMarshal().
    +	marshalFn := (&jsonpb.Marshaler{}).Marshal
    +
    +	var buf bytes.Buffer
    +	if err := marshalFn(&buf, request); err != nil {
    +		return err
    +	}
    +	req, err := http.NewRequest("POST", path, &buf)
    +	if err != nil {
    +		return err
    +	}
    +	return doJSONRequest(httpClient, req, response)
    +}
    +
    +func doJSONRequest(httpClient http.Client, req *http.Request, response proto.Message) error {
    +	if timeout := httpClient.Timeout; timeout > 0 {
    +		req.Header.Set("Grpc-Timeout", strconv.FormatInt(timeout.Nanoseconds(), 10)+"n")
    +	}
    +	req.Header.Set(AcceptHeader, JSONContentType)
    +	resp, err := httpClient.Do(req)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	if contentType := resp.Header.Get(ContentTypeHeader); !(resp.StatusCode == http.StatusOK && contentType == JSONContentType) {
    +		b, err := ioutil.ReadAll(resp.Body)
    +		return errors.Errorf("status: %s, content-type: %s, body: %s, error: %v", resp.Status, contentType, b, err)
    +	}
    +	return jsonpb.Unmarshal(resp.Body, response)
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/clone.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/clone.go
    new file mode 100644
    index 0000000..0fd4cca
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/clone.go
    @@ -0,0 +1,117 @@
    +// Copyright 2016 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Tamir Duberstein (tamird@gmail.com)
    +
    +package protoutil
    +
    +import (
    +	"fmt"
    +	"reflect"
    +
    +	"github.com/cockroachdb/cockroach/pkg/util/syncutil"
    +	"github.com/gogo/protobuf/proto"
    +)
    +
    +var verbotenKinds = [...]reflect.Kind{
    +	reflect.Array,
    +}
    +
    +type typeKey struct {
    +	typ      reflect.Type
    +	verboten reflect.Kind
    +}
    +
    +var types struct {
    +	syncutil.Mutex
    +	known map[typeKey]reflect.Type
    +}
    +
    +func init() {
    +	types.known = make(map[typeKey]reflect.Type)
    +}
    +
    +// Clone uses proto.Clone to return a deep copy of pb. It panics if pb
    +// recursively contains any instances of types which are known to be
    +// unsupported by proto.Clone.
    +//
    +// This function and its associated lint (see build/style_test.go) exist to
    +// ensure we do not attempt to proto.Clone types which are not supported by
    +// proto.Clone. This hackery is necessary because proto.Clone gives no direct
    +// indication that it has incompletely cloned a type; it merely logs to standard
    +// output (see
    +// https://github.com/golang/protobuf/blob/89238a3/proto/clone.go#L204).
    +//
    +// The concrete case against which this is currently guarding may be resolved
    +// upstream, see https://github.com/gogo/protobuf/issues/147.
    +func Clone(pb proto.Message) proto.Message {
    +	for _, verbotenKind := range verbotenKinds {
    +		if t := typeIsOrContainsVerboten(reflect.TypeOf(pb), verbotenKind); t != nil {
    +			panic(fmt.Sprintf("attempt to clone %T, which contains uncloneable field of type %s", pb, t))
    +		}
    +	}
    +
    +	return proto.Clone(pb)
    +}
    +
    +func typeIsOrContainsVerboten(t reflect.Type, verboten reflect.Kind) reflect.Type {
    +	types.Lock()
    +	defer types.Unlock()
    +
    +	return typeIsOrContainsVerbotenLocked(t, verboten)
    +}
    +
    +func typeIsOrContainsVerbotenLocked(t reflect.Type, verboten reflect.Kind) reflect.Type {
    +	key := typeKey{t, verboten}
    +	knownTypeIsOrContainsVerboten, ok := types.known[key]
    +	if !ok {
    +		knownTypeIsOrContainsVerboten = typeIsOrContainsVerbotenImpl(t, verboten)
    +		types.known[key] = knownTypeIsOrContainsVerboten
    +	}
    +	return knownTypeIsOrContainsVerboten
    +}
    +
    +func typeIsOrContainsVerbotenImpl(t reflect.Type, verboten reflect.Kind) reflect.Type {
    +	switch t.Kind() {
    +	case verboten:
    +		return t
    +
    +	case reflect.Map:
    +		if key := typeIsOrContainsVerbotenLocked(t.Key(), verboten); key != nil {
    +			return key
    +		}
    +		if value := typeIsOrContainsVerbotenLocked(t.Elem(), verboten); value != nil {
    +			return value
    +		}
    +
    +	case reflect.Array, reflect.Ptr, reflect.Slice:
    +		if value := typeIsOrContainsVerbotenLocked(t.Elem(), verboten); value != nil {
    +			return value
    +		}
    +
    +	case reflect.Struct:
    +		for i := 0; i < t.NumField(); i++ {
    +			if field := typeIsOrContainsVerbotenLocked(t.Field(i).Type, verboten); field != nil {
    +				return field
    +			}
    +		}
    +
    +	case reflect.Chan, reflect.Func:
    +		// Not strictly correct, but cloning these kinds is not allowed.
    +		return t
    +
    +	}
    +
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/jsonpb_marshal.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/jsonpb_marshal.go
    new file mode 100644
    index 0000000..d3be893
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/jsonpb_marshal.go
    @@ -0,0 +1,128 @@
    +// Copyright 2016 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Tamir Duberstein (tamird@gmail.com)
    +
    +package protoutil
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"reflect"
    +
    +	"github.com/gogo/protobuf/jsonpb"
    +	"github.com/gogo/protobuf/proto"
    +	gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
    +	"github.com/pkg/errors"
    +
    +	"github.com/cockroachdb/cockroach/pkg/util/httputil"
    +)
    +
    +var _ gwruntime.Marshaler = (*JSONPb)(nil)
    +
    +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
    +
    +// JSONPb is a gwruntime.Marshaler that uses github.com/gogo/protobuf/jsonpb.
    +type JSONPb jsonpb.Marshaler
    +
    +// ContentType implements gwruntime.Marshaler.
    +func (*JSONPb) ContentType() string {
    +	return httputil.JSONContentType
    +}
    +
    +// Marshal implements gwruntime.Marshaler.
    +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
    +	return j.marshal(v)
    +}
    +
    +// a lower-case version of marshal to allow for a call from
    +// marshalNonProtoField without upsetting TestProtoMarshal().
    +func (j *JSONPb) marshal(v interface{}) ([]byte, error) {
    +	if pb, ok := v.(proto.Message); ok {
    +		var buf bytes.Buffer
    +		marshalFn := (*jsonpb.Marshaler)(j).Marshal
    +		if err := marshalFn(&buf, pb); err != nil {
    +			return nil, err
    +		}
    +		return buf.Bytes(), nil
    +	}
    +	return j.marshalNonProtoField(v)
    +}
    +
    +// Cribbed verbatim from grpc-gateway.
    +type protoEnum interface {
    +	fmt.Stringer
    +	EnumDescriptor() ([]byte, []int)
    +}
    +
    +// Cribbed verbatim from grpc-gateway.
    +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
    +	rv := reflect.ValueOf(v)
    +	for rv.Kind() == reflect.Ptr {
    +		if rv.IsNil() {
    +			return []byte("null"), nil
    +		}
    +		rv = rv.Elem()
    +	}
    +
    +	if rv.Kind() == reflect.Map {
    +		m := make(map[string]*json.RawMessage)
    +		for _, k := range rv.MapKeys() {
    +			buf, err := j.marshal(rv.MapIndex(k).Interface())
    +			if err != nil {
    +				return nil, err
    +			}
    +			m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
    +		}
    +		if j.Indent != "" {
    +			return json.MarshalIndent(m, "", j.Indent)
    +		}
    +		return json.Marshal(m)
    +	}
    +	if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
    +		return json.Marshal(enum.String())
    +	}
    +	return json.Marshal(rv.Interface())
    +}
    +
    +// Unmarshal implements gwruntime.Marshaler.
    +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
    +	if pb, ok := v.(proto.Message); ok {
    +		return jsonpb.Unmarshal(bytes.NewReader(data), pb)
    +	}
    +	return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +}
    +
    +// NewDecoder implements gwruntime.Marshaler.
    +func (j *JSONPb) NewDecoder(r io.Reader) gwruntime.Decoder {
    +	return gwruntime.DecoderFunc(func(v interface{}) error {
    +		if pb, ok := v.(proto.Message); ok {
    +			return jsonpb.Unmarshal(r, pb)
    +		}
    +		return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +	})
    +}
    +
    +// NewEncoder implements gwruntime.Marshaler.
    +func (j *JSONPb) NewEncoder(w io.Writer) gwruntime.Encoder {
    +	return gwruntime.EncoderFunc(func(v interface{}) error {
    +		if pb, ok := v.(proto.Message); ok {
    +			marshalFn := (*jsonpb.Marshaler)(j).Marshal
    +			return marshalFn(w, pb)
    +		}
    +		return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +	})
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshal.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshal.go
    new file mode 100644
    index 0000000..79a6af8
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshal.go
    @@ -0,0 +1,31 @@
    +// Copyright 2016 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Tamir Duberstein (tamird@gmail.com)
    +
    +package protoutil
    +
    +import "github.com/gogo/protobuf/proto"
    +
    +// Interceptor will be called with every proto before it is marshalled.
    +// Interceptor is not safe to modify concurrently with calls to Marshal.
    +var Interceptor = func(_ proto.Message) {}
    +
    +// Marshal uses proto.Marshal to encode pb into the wire format. It is used in
    +// some tests to intercept calls to proto.Marshal.
    +func Marshal(pb proto.Message) ([]byte, error) {
    +	Interceptor(pb)
    +
    +	return proto.Marshal(pb)
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshaler.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshaler.go
    new file mode 100644
    index 0000000..eade985
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshaler.go
    @@ -0,0 +1,96 @@
    +// Copyright 2016 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Tamir Duberstein (tamird@gmail.com)
    +
    +package protoutil
    +
    +import (
    +	"io"
    +	"io/ioutil"
    +
    +	"github.com/gogo/protobuf/proto"
    +	gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
    +	"github.com/pkg/errors"
    +
    +	"github.com/cockroachdb/cockroach/pkg/util/httputil"
    +)
    +
    +var _ gwruntime.Marshaler = (*ProtoPb)(nil)
    +
    +// ProtoPb is a gwruntime.Marshaler that uses github.com/gogo/protobuf/proto.
    +type ProtoPb struct{}
    +
    +// ContentType implements gwruntime.Marshaler.
    +func (*ProtoPb) ContentType() string {
    +	return httputil.ProtoContentType
    +}
    +
    +// Marshal implements gwruntime.Marshaler.
    +func (*ProtoPb) Marshal(v interface{}) ([]byte, error) {
    +	if p, ok := v.(proto.Message); ok {
    +		return Marshal(p)
    +	}
    +	return nil, errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +}
    +
    +// Unmarshal implements gwruntime.Marshaler.
    +func (*ProtoPb) Unmarshal(data []byte, v interface{}) error {
    +	if p, ok := v.(proto.Message); ok {
    +		return proto.Unmarshal(data, p)
    +	}
    +	return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +}
    +
    +type protoDecoder struct {
    +	r io.Reader
    +}
    +
    +// NewDecoder implements gwruntime.Marshaler.
    +func (*ProtoPb) NewDecoder(r io.Reader) gwruntime.Decoder {
    +	return &protoDecoder{r: r}
    +}
    +
    +// Decode implements gwruntime.Marshaler.
    +func (d *protoDecoder) Decode(v interface{}) error {
    +	if p, ok := v.(proto.Message); ok {
    +		bytes, err := ioutil.ReadAll(d.r)
    +		if err == nil {
    +			err = proto.Unmarshal(bytes, p)
    +		}
    +		return err
    +	}
    +	return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +}
    +
    +type protoEncoder struct {
    +	w io.Writer
    +}
    +
    +// NewEncoder implements gwruntime.Marshaler.
    +func (*ProtoPb) NewEncoder(w io.Writer) gwruntime.Encoder {
    +	return &protoEncoder{w: w}
    +}
    +
    +// Encode implements gwruntime.Marshaler.
    +func (e *protoEncoder) Encode(v interface{}) error {
    +	if p, ok := v.(proto.Message); ok {
    +		bytes, err := Marshal(p)
    +		if err == nil {
    +			_, err = e.w.Write(bytes)
    +		}
    +		return err
    +	}
    +	return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_deadlock.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_deadlock.go
    new file mode 100644
    index 0000000..fdf201b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_deadlock.go
    @@ -0,0 +1,47 @@
    +// Copyright 2016 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Tamir Duberstein (tamird@gmail.com)
    +
    +// +build deadlock
    +
    +package syncutil
    +
    +import (
    +	"time"
    +
    +	deadlock "github.com/sasha-s/go-deadlock"
    +)
    +
    +func init() {
    +	deadlock.Opts.DeadlockTimeout = 5 * time.Minute
    +}
    +
    +// A Mutex is a mutual exclusion lock.
    +type Mutex struct {
    +	deadlock.Mutex
    +}
    +
    +// AssertHeld is a no-op for deadlock mutexes.
    +func (m *Mutex) AssertHeld() {
    +}
    +
    +// An RWMutex is a reader/writer mutual exclusion lock.
    +type RWMutex struct {
    +	deadlock.RWMutex
    +}
    +
    +// AssertHeld is a no-op for deadlock mutexes.
    +func (m *RWMutex) AssertHeld() {
    +}
    diff --git a/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_sync.go b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_sync.go
    new file mode 100644
    index 0000000..3249c80
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_sync.go
    @@ -0,0 +1,92 @@
    +// Copyright 2016 The Cockroach Authors.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +// implied. See the License for the specific language governing
    +// permissions and limitations under the License.
    +//
    +// Author: Tamir Duberstein (tamird@gmail.com)
    +
    +// +build !deadlock
    +
    +package syncutil
    +
    +import (
    +	"sync"
    +	"sync/atomic"
    +)
    +
    +// A Mutex is a mutual exclusion lock.
    +type Mutex struct {
    +	mu       sync.Mutex
    +	isLocked int32 // updated atomically
    +}
    +
    +// Lock implements sync.Locker.
    +func (m *Mutex) Lock() {
    +	m.mu.Lock()
    +	atomic.StoreInt32(&m.isLocked, 1)
    +}
    +
    +// Unlock implements sync.Locker.
    +func (m *Mutex) Unlock() {
    +	atomic.StoreInt32(&m.isLocked, 0)
    +	m.mu.Unlock()
    +}
    +
    +// AssertHeld may panic if the mutex is not locked (but it is not required to
    +// do so). Functions which require that their callers hold a particular lock
    +// may use this to enforce this requirement more directly than relying on the
    +// race detector.
    +//
    +// Note that we do not require the lock to be held by any particular thread,
    +// just that some thread holds the lock. This is both more efficient and allows
    +// for rare cases where a mutex is locked in one thread and used in another.
    +func (m *Mutex) AssertHeld() {
    +	if atomic.LoadInt32(&m.isLocked) == 0 {
    +		panic("mutex is not locked")
    +	}
    +}
    +
    +// TODO(pmattis): Mutex.AssertHeld is neither used or tested. Silence unused
    +// warning.
    +var _ = (*Mutex).AssertHeld
    +
    +// An RWMutex is a reader/writer mutual exclusion lock.
    +type RWMutex struct {
    +	sync.RWMutex
    +	isLocked int32 // updated atomically
    +}
    +
    +// Lock implements sync.Locker.
    +func (m *RWMutex) Lock() {
    +	m.RWMutex.Lock()
    +	atomic.StoreInt32(&m.isLocked, 1)
    +}
    +
    +// Unlock implements sync.Locker.
    +func (m *RWMutex) Unlock() {
    +	atomic.StoreInt32(&m.isLocked, 0)
    +	m.RWMutex.Unlock()
    +}
    +
    +// AssertHeld may panic if the mutex is not locked for writing (but it is not
    +// required to do so). Functions which require that their callers hold a
    +// particular lock may use this to enforce this requirement more directly than
    +// relying on the race detector.
    +//
    +// Note that we do not require the lock to be held by any particular thread,
    +// just that some thread holds the lock. This is both more efficient and allows
    +// for rare cases where a mutex is locked in one thread and used in another.
    +func (m *RWMutex) AssertHeld() {
    +	if atomic.LoadInt32(&m.isLocked) == 0 {
    +		panic("mutex is not locked")
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/LICENSE b/src/prometheus/vendor/github.com/davecgh/go-spew/LICENSE
    new file mode 100644
    index 0000000..2a7cfd2
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/LICENSE
    @@ -0,0 +1,13 @@
    +Copyright (c) 2012-2013 Dave Collins 
    +
    +Permission to use, copy, modify, and distribute this software for any
    +purpose with or without fee is hereby granted, provided that the above
    +copyright notice and this permission notice appear in all copies.
    +
    +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/bypass.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/bypass.go
    new file mode 100644
    index 0000000..565bf58
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/bypass.go
    @@ -0,0 +1,151 @@
    +// Copyright (c) 2015 Dave Collins 
    +//
    +// Permission to use, copy, modify, and distribute this software for any
    +// purpose with or without fee is hereby granted, provided that the above
    +// copyright notice and this permission notice appear in all copies.
    +//
    +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    +
    +// NOTE: Due to the following build constraints, this file will only be compiled
    +// when the code is not running on Google App Engine and "-tags disableunsafe"
    +// is not added to the go build command line.
    +// +build !appengine,!disableunsafe
    +
    +package spew
    +
    +import (
    +	"reflect"
    +	"unsafe"
    +)
    +
    +const (
    +	// UnsafeDisabled is a build-time constant which specifies whether or
    +	// not access to the unsafe package is available.
    +	UnsafeDisabled = false
    +
    +	// ptrSize is the size of a pointer on the current arch.
    +	ptrSize = unsafe.Sizeof((*byte)(nil))
    +)
    +
    +var (
    +	// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
    +	// internal reflect.Value fields.  These values are valid before golang
    +	// commit ecccf07e7f9d which changed the format.  The are also valid
    +	// after commit 82f48826c6c7 which changed the format again to mirror
    +	// the original format.  Code in the init function updates these offsets
    +	// as necessary.
    +	offsetPtr    = uintptr(ptrSize)
    +	offsetScalar = uintptr(0)
    +	offsetFlag   = uintptr(ptrSize * 2)
    +
    +	// flagKindWidth and flagKindShift indicate various bits that the
    +	// reflect package uses internally to track kind information.
    +	//
    +	// flagRO indicates whether or not the value field of a reflect.Value is
    +	// read-only.
    +	//
    +	// flagIndir indicates whether the value field of a reflect.Value is
    +	// the actual data or a pointer to the data.
    +	//
    +	// These values are valid before golang commit 90a7c3c86944 which
    +	// changed their positions.  Code in the init function updates these
    +	// flags as necessary.
    +	flagKindWidth = uintptr(5)
    +	flagKindShift = uintptr(flagKindWidth - 1)
    +	flagRO        = uintptr(1 << 0)
    +	flagIndir     = uintptr(1 << 1)
    +)
    +
    +func init() {
    +	// Older versions of reflect.Value stored small integers directly in the
    +	// ptr field (which is named val in the older versions).  Versions
    +	// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
    +	// scalar for this purpose which unfortunately came before the flag
    +	// field, so the offset of the flag field is different for those
    +	// versions.
    +	//
    +	// This code constructs a new reflect.Value from a known small integer
    +	// and checks if the size of the reflect.Value struct indicates it has
    +	// the scalar field. When it does, the offsets are updated accordingly.
    +	vv := reflect.ValueOf(0xf00)
    +	if unsafe.Sizeof(vv) == (ptrSize * 4) {
    +		offsetScalar = ptrSize * 2
    +		offsetFlag = ptrSize * 3
    +	}
    +
    +	// Commit 90a7c3c86944 changed the flag positions such that the low
    +	// order bits are the kind.  This code extracts the kind from the flags
    +	// field and ensures it's the correct type.  When it's not, the flag
    +	// order has been changed to the newer format, so the flags are updated
    +	// accordingly.
    +	upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
    +	upfv := *(*uintptr)(upf)
    +	flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) {
    +		flagKindShift = 0
    +		flagRO = 1 << 5
    +		flagIndir = 1 << 6
    +
    +		// Commit adf9b30e5594 modified the flags to separate the
    +		// flagRO flag into two bits which specifies whether or not the
    +		// field is embedded.  This causes flagIndir to move over a bit
    +		// and means that flagRO is the combination of either of the
    +		// original flagRO bit and the new bit.
    +		//
    +		// This code detects the change by extracting what used to be
    +		// the indirect bit to ensure it's set.  When it's not, the flag
    +		// order has been changed to the newer format, so the flags are
    +		// updated accordingly.
    +		if upfv&flagIndir == 0 {
    +			flagRO = 3 << 5
    +			flagIndir = 1 << 7
    +		}
    +	}
    +}
    +
    +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
    +// the typical safety restrictions preventing access to unaddressable and
    +// unexported data.  It works by digging the raw pointer to the underlying
    +// value out of the protected value and generating a new unprotected (unsafe)
    +// reflect.Value to it.
    +//
    +// This allows us to check for implementations of the Stringer and error
    +// interfaces to be used for pretty printing ordinarily unaddressable and
    +// inaccessible values such as unexported struct fields.
    +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
    +	indirects := 1
    +	vt := v.Type()
    +	upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
    +	rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
    +	if rvf&flagIndir != 0 {
    +		vt = reflect.PtrTo(v.Type())
    +		indirects++
    +	} else if offsetScalar != 0 {
    +		// The value is in the scalar field when it's not one of the
    +		// reference types.
    +		switch vt.Kind() {
    +		case reflect.Uintptr:
    +		case reflect.Chan:
    +		case reflect.Func:
    +		case reflect.Map:
    +		case reflect.Ptr:
    +		case reflect.UnsafePointer:
    +		default:
    +			upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
    +				offsetScalar)
    +		}
    +	}
    +
    +	pv := reflect.NewAt(vt, upv)
    +	rv = pv
    +	for i := 0; i < indirects; i++ {
    +		rv = rv.Elem()
    +	}
    +	return rv
    +}
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/common.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/common.go
    new file mode 100644
    index 0000000..14f02dc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/common.go
    @@ -0,0 +1,341 @@
    +/*
    + * Copyright (c) 2013 Dave Collins 
    + *
    + * Permission to use, copy, modify, and distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    + */
    +
    +package spew
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"reflect"
    +	"sort"
    +	"strconv"
    +)
    +
    +// Some constants in the form of bytes to avoid string overhead.  This mirrors
    +// the technique used in the fmt package.
    +var (
    +	panicBytes            = []byte("(PANIC=")
    +	plusBytes             = []byte("+")
    +	iBytes                = []byte("i")
    +	trueBytes             = []byte("true")
    +	falseBytes            = []byte("false")
    +	interfaceBytes        = []byte("(interface {})")
    +	commaNewlineBytes     = []byte(",\n")
    +	newlineBytes          = []byte("\n")
    +	openBraceBytes        = []byte("{")
    +	openBraceNewlineBytes = []byte("{\n")
    +	closeBraceBytes       = []byte("}")
    +	asteriskBytes         = []byte("*")
    +	colonBytes            = []byte(":")
    +	colonSpaceBytes       = []byte(": ")
    +	openParenBytes        = []byte("(")
    +	closeParenBytes       = []byte(")")
    +	spaceBytes            = []byte(" ")
    +	pointerChainBytes     = []byte("->")
    +	nilAngleBytes         = []byte("")
    +	maxNewlineBytes       = []byte("\n")
    +	maxShortBytes         = []byte("")
    +	circularBytes         = []byte("")
    +	circularShortBytes    = []byte("")
    +	invalidAngleBytes     = []byte("")
    +	openBracketBytes      = []byte("[")
    +	closeBracketBytes     = []byte("]")
    +	percentBytes          = []byte("%")
    +	precisionBytes        = []byte(".")
    +	openAngleBytes        = []byte("<")
    +	closeAngleBytes       = []byte(">")
    +	openMapBytes          = []byte("map[")
    +	closeMapBytes         = []byte("]")
    +	lenEqualsBytes        = []byte("len=")
    +	capEqualsBytes        = []byte("cap=")
    +)
    +
    +// hexDigits is used to map a decimal value to a hex digit.
    +var hexDigits = "0123456789abcdef"
    +
    +// catchPanic handles any panics that might occur during the handleMethods
    +// calls.
    +func catchPanic(w io.Writer, v reflect.Value) {
    +	if err := recover(); err != nil {
    +		w.Write(panicBytes)
    +		fmt.Fprintf(w, "%v", err)
    +		w.Write(closeParenBytes)
    +	}
    +}
    +
    +// handleMethods attempts to call the Error and String methods on the underlying
    +// type the passed reflect.Value represents and outputes the result to Writer w.
    +//
    +// It handles panics in any called methods by catching and displaying the error
    +// as the formatted value.
    +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
    +	// We need an interface to check if the type implements the error or
    +	// Stringer interface.  However, the reflect package won't give us an
    +	// interface on certain things like unexported struct fields in order
    +	// to enforce visibility rules.  We use unsafe, when it's available,
    +	// to bypass these restrictions since this package does not mutate the
    +	// values.
    +	if !v.CanInterface() {
    +		if UnsafeDisabled {
    +			return false
    +		}
    +
    +		v = unsafeReflectValue(v)
    +	}
    +
    +	// Choose whether or not to do error and Stringer interface lookups against
    +	// the base type or a pointer to the base type depending on settings.
    +	// Technically calling one of these methods with a pointer receiver can
    +	// mutate the value, however, types which choose to satisify an error or
    +	// Stringer interface with a pointer receiver should not be mutating their
    +	// state inside these interface methods.
    +	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
    +		v = unsafeReflectValue(v)
    +	}
    +	if v.CanAddr() {
    +		v = v.Addr()
    +	}
    +
    +	// Is it an error or Stringer?
    +	switch iface := v.Interface().(type) {
    +	case error:
    +		defer catchPanic(w, v)
    +		if cs.ContinueOnMethod {
    +			w.Write(openParenBytes)
    +			w.Write([]byte(iface.Error()))
    +			w.Write(closeParenBytes)
    +			w.Write(spaceBytes)
    +			return false
    +		}
    +
    +		w.Write([]byte(iface.Error()))
    +		return true
    +
    +	case fmt.Stringer:
    +		defer catchPanic(w, v)
    +		if cs.ContinueOnMethod {
    +			w.Write(openParenBytes)
    +			w.Write([]byte(iface.String()))
    +			w.Write(closeParenBytes)
    +			w.Write(spaceBytes)
    +			return false
    +		}
    +		w.Write([]byte(iface.String()))
    +		return true
    +	}
    +	return false
    +}
    +
    +// printBool outputs a boolean value as true or false to Writer w.
    +func printBool(w io.Writer, val bool) {
    +	if val {
    +		w.Write(trueBytes)
    +	} else {
    +		w.Write(falseBytes)
    +	}
    +}
    +
    +// printInt outputs a signed integer value to Writer w.
    +func printInt(w io.Writer, val int64, base int) {
    +	w.Write([]byte(strconv.FormatInt(val, base)))
    +}
    +
    +// printUint outputs an unsigned integer value to Writer w.
    +func printUint(w io.Writer, val uint64, base int) {
    +	w.Write([]byte(strconv.FormatUint(val, base)))
    +}
    +
    +// printFloat outputs a floating point value using the specified precision,
    +// which is expected to be 32 or 64bit, to Writer w.
    +func printFloat(w io.Writer, val float64, precision int) {
    +	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
    +}
    +
    +// printComplex outputs a complex value using the specified float precision
    +// for the real and imaginary parts to Writer w.
    +func printComplex(w io.Writer, c complex128, floatPrecision int) {
    +	r := real(c)
    +	w.Write(openParenBytes)
    +	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
    +	i := imag(c)
    +	if i >= 0 {
    +		w.Write(plusBytes)
    +	}
    +	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
    +	w.Write(iBytes)
    +	w.Write(closeParenBytes)
    +}
    +
    +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
    +// prefix to Writer w.
    +func printHexPtr(w io.Writer, p uintptr) {
    +	// Null pointer.
    +	num := uint64(p)
    +	if num == 0 {
    +		w.Write(nilAngleBytes)
    +		return
    +	}
    +
    +	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
    +	buf := make([]byte, 18)
    +
    +	// It's simpler to construct the hex string right to left.
    +	base := uint64(16)
    +	i := len(buf) - 1
    +	for num >= base {
    +		buf[i] = hexDigits[num%base]
    +		num /= base
    +		i--
    +	}
    +	buf[i] = hexDigits[num]
    +
    +	// Add '0x' prefix.
    +	i--
    +	buf[i] = 'x'
    +	i--
    +	buf[i] = '0'
    +
    +	// Strip unused leading bytes.
    +	buf = buf[i:]
    +	w.Write(buf)
    +}
    +
    +// valuesSorter implements sort.Interface to allow a slice of reflect.Value
    +// elements to be sorted.
    +type valuesSorter struct {
    +	values  []reflect.Value
    +	strings []string // either nil or same len and values
    +	cs      *ConfigState
    +}
    +
    +// newValuesSorter initializes a valuesSorter instance, which holds a set of
    +// surrogate keys on which the data should be sorted.  It uses flags in
    +// ConfigState to decide if and how to populate those surrogate keys.
    +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
    +	vs := &valuesSorter{values: values, cs: cs}
    +	if canSortSimply(vs.values[0].Kind()) {
    +		return vs
    +	}
    +	if !cs.DisableMethods {
    +		vs.strings = make([]string, len(values))
    +		for i := range vs.values {
    +			b := bytes.Buffer{}
    +			if !handleMethods(cs, &b, vs.values[i]) {
    +				vs.strings = nil
    +				break
    +			}
    +			vs.strings[i] = b.String()
    +		}
    +	}
    +	if vs.strings == nil && cs.SpewKeys {
    +		vs.strings = make([]string, len(values))
    +		for i := range vs.values {
    +			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
    +		}
    +	}
    +	return vs
    +}
    +
    +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
    +// directly, or whether it should be considered for sorting by surrogate keys
    +// (if the ConfigState allows it).
    +func canSortSimply(kind reflect.Kind) bool {
    +	// This switch parallels valueSortLess, except for the default case.
    +	switch kind {
    +	case reflect.Bool:
    +		return true
    +	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
    +		return true
    +	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
    +		return true
    +	case reflect.Float32, reflect.Float64:
    +		return true
    +	case reflect.String:
    +		return true
    +	case reflect.Uintptr:
    +		return true
    +	case reflect.Array:
    +		return true
    +	}
    +	return false
    +}
    +
    +// Len returns the number of values in the slice.  It is part of the
    +// sort.Interface implementation.
    +func (s *valuesSorter) Len() int {
    +	return len(s.values)
    +}
    +
    +// Swap swaps the values at the passed indices.  It is part of the
    +// sort.Interface implementation.
    +func (s *valuesSorter) Swap(i, j int) {
    +	s.values[i], s.values[j] = s.values[j], s.values[i]
    +	if s.strings != nil {
    +		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
    +	}
    +}
    +
    +// valueSortLess returns whether the first value should sort before the second
    +// value.  It is used by valueSorter.Less as part of the sort.Interface
    +// implementation.
    +func valueSortLess(a, b reflect.Value) bool {
    +	switch a.Kind() {
    +	case reflect.Bool:
    +		return !a.Bool() && b.Bool()
    +	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
    +		return a.Int() < b.Int()
    +	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
    +		return a.Uint() < b.Uint()
    +	case reflect.Float32, reflect.Float64:
    +		return a.Float() < b.Float()
    +	case reflect.String:
    +		return a.String() < b.String()
    +	case reflect.Uintptr:
    +		return a.Uint() < b.Uint()
    +	case reflect.Array:
    +		// Compare the contents of both arrays.
    +		l := a.Len()
    +		for i := 0; i < l; i++ {
    +			av := a.Index(i)
    +			bv := b.Index(i)
    +			if av.Interface() == bv.Interface() {
    +				continue
    +			}
    +			return valueSortLess(av, bv)
    +		}
    +	}
    +	return a.String() < b.String()
    +}
    +
    +// Less returns whether the value at index i should sort before the
    +// value at index j.  It is part of the sort.Interface implementation.
    +func (s *valuesSorter) Less(i, j int) bool {
    +	if s.strings == nil {
    +		return valueSortLess(s.values[i], s.values[j])
    +	}
    +	return s.strings[i] < s.strings[j]
    +}
    +
    +// sortValues is a sort function that handles both native types and any type that
    +// can be converted to error or Stringer.  Other inputs are sorted according to
    +// their Value.String() value to ensure display stability.
    +func sortValues(values []reflect.Value, cs *ConfigState) {
    +	if len(values) == 0 {
    +		return
    +	}
    +	sort.Sort(newValuesSorter(values, cs))
    +}
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/config.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/config.go
    new file mode 100644
    index 0000000..ee1ab07
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/config.go
    @@ -0,0 +1,297 @@
    +/*
    + * Copyright (c) 2013 Dave Collins 
    + *
    + * Permission to use, copy, modify, and distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    + */
    +
    +package spew
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"os"
    +)
    +
    +// ConfigState houses the configuration options used by spew to format and
    +// display values.  There is a global instance, Config, that is used to control
    +// all top-level Formatter and Dump functionality.  Each ConfigState instance
    +// provides methods equivalent to the top-level functions.
    +//
    +// The zero value for ConfigState provides no indentation.  You would typically
    +// want to set it to a space or a tab.
    +//
    +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
    +// with default settings.  See the documentation of NewDefaultConfig for default
    +// values.
    +type ConfigState struct {
    +	// Indent specifies the string to use for each indentation level.  The
    +	// global config instance that all top-level functions use set this to a
    +	// single space by default.  If you would like more indentation, you might
    +	// set this to a tab with "\t" or perhaps two spaces with "  ".
    +	Indent string
    +
    +	// MaxDepth controls the maximum number of levels to descend into nested
    +	// data structures.  The default, 0, means there is no limit.
    +	//
    +	// NOTE: Circular data structures are properly detected, so it is not
    +	// necessary to set this value unless you specifically want to limit deeply
    +	// nested data structures.
    +	MaxDepth int
    +
    +	// DisableMethods specifies whether or not error and Stringer interfaces are
    +	// invoked for types that implement them.
    +	DisableMethods bool
    +
    +	// DisablePointerMethods specifies whether or not to check for and invoke
    +	// error and Stringer interfaces on types which only accept a pointer
    +	// receiver when the current type is not a pointer.
    +	//
    +	// NOTE: This might be an unsafe action since calling one of these methods
    +	// with a pointer receiver could technically mutate the value, however,
    +	// in practice, types which choose to satisify an error or Stringer
    +	// interface with a pointer receiver should not be mutating their state
    +	// inside these interface methods.  As a result, this option relies on
    +	// access to the unsafe package, so it will not have any effect when
    +	// running in environments without access to the unsafe package such as
    +	// Google App Engine or with the "disableunsafe" build tag specified.
    +	DisablePointerMethods bool
    +
    +	// ContinueOnMethod specifies whether or not recursion should continue once
    +	// a custom error or Stringer interface is invoked.  The default, false,
    +	// means it will print the results of invoking the custom error or Stringer
    +	// interface and return immediately instead of continuing to recurse into
    +	// the internals of the data type.
    +	//
    +	// NOTE: This flag does not have any effect if method invocation is disabled
    +	// via the DisableMethods or DisablePointerMethods options.
    +	ContinueOnMethod bool
    +
    +	// SortKeys specifies map keys should be sorted before being printed. Use
    +	// this to have a more deterministic, diffable output.  Note that only
    +	// native types (bool, int, uint, floats, uintptr and string) and types
    +	// that support the error or Stringer interfaces (if methods are
    +	// enabled) are supported, with other types sorted according to the
    +	// reflect.Value.String() output which guarantees display stability.
    +	SortKeys bool
    +
    +	// SpewKeys specifies that, as a last resort attempt, map keys should
    +	// be spewed to strings and sorted by those strings.  This is only
    +	// considered if SortKeys is true.
    +	SpewKeys bool
    +}
    +
    +// Config is the active configuration of the top-level functions.
    +// The configuration can be changed by modifying the contents of spew.Config.
    +var Config = ConfigState{Indent: " "}
    +
    +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the formatted string as a value that satisfies error.  See NewFormatter
    +// for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
    +	return fmt.Errorf(format, c.convertArgs(a)...)
    +}
    +
    +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
    +	return fmt.Fprint(w, c.convertArgs(a)...)
    +}
    +
    +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
    +	return fmt.Fprintf(w, format, c.convertArgs(a)...)
    +}
    +
    +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
    +// passed with a Formatter interface returned by c.NewFormatter.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
    +	return fmt.Fprintln(w, c.convertArgs(a)...)
    +}
    +
    +// Print is a wrapper for fmt.Print that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
    +	return fmt.Print(c.convertArgs(a)...)
    +}
    +
    +// Printf is a wrapper for fmt.Printf that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
    +	return fmt.Printf(format, c.convertArgs(a)...)
    +}
    +
    +// Println is a wrapper for fmt.Println that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
    +	return fmt.Println(c.convertArgs(a)...)
    +}
    +
    +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the resulting string.  See NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Sprint(a ...interface{}) string {
    +	return fmt.Sprint(c.convertArgs(a)...)
    +}
    +
    +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
    +// passed with a Formatter interface returned by c.NewFormatter.  It returns
    +// the resulting string.  See NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
    +	return fmt.Sprintf(format, c.convertArgs(a)...)
    +}
    +
    +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
    +// were passed with a Formatter interface returned by c.NewFormatter.  It
    +// returns the resulting string.  See NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
    +func (c *ConfigState) Sprintln(a ...interface{}) string {
    +	return fmt.Sprintln(c.convertArgs(a)...)
    +}
    +
    +/*
    +NewFormatter returns a custom formatter that satisfies the fmt.Formatter
    +interface.  As a result, it integrates cleanly with standard fmt package
    +printing functions.  The formatter is useful for inline printing of smaller data
    +types similar to the standard %v format specifier.
    +
    +The custom formatter only responds to the %v (most compact), %+v (adds pointer
    +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
    +combinations.  Any other verbs such as %x and %q will be sent to the the
    +standard fmt package for formatting.  In addition, the custom formatter ignores
    +the width and precision arguments (however they will still work on the format
    +specifiers not handled by the custom formatter).
    +
    +Typically this function shouldn't be called directly.  It is much easier to make
    +use of the custom formatter by calling one of the convenience functions such as
    +c.Printf, c.Println, or c.Printf.
    +*/
    +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
    +	return newFormatter(c, v)
    +}
    +
    +// Fdump formats and displays the passed arguments to io.Writer w.  It formats
    +// exactly the same as Dump.
    +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
    +	fdump(c, w, a...)
    +}
    +
    +/*
    +Dump displays the passed parameters to standard out with newlines, customizable
    +indentation, and additional debug information such as complete types and all
    +pointer addresses used to indirect to the final value.  It provides the
    +following features over the built-in printing facilities provided by the fmt
    +package:
    +
    +	* Pointers are dereferenced and followed
    +	* Circular data structures are detected and handled properly
    +	* Custom Stringer/error interfaces are optionally invoked, including
    +	  on unexported types
    +	* Custom types which only implement the Stringer/error interfaces via
    +	  a pointer receiver are optionally invoked when passing non-pointer
    +	  variables
    +	* Byte arrays and slices are dumped like the hexdump -C command which
    +	  includes offsets, byte values in hex, and ASCII output
    +
    +The configuration options are controlled by modifying the public members
    +of c.  See ConfigState for options documentation.
    +
    +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
    +get the formatted result as a string.
    +*/
    +func (c *ConfigState) Dump(a ...interface{}) {
    +	fdump(c, os.Stdout, a...)
    +}
    +
    +// Sdump returns a string with the passed arguments formatted exactly the same
    +// as Dump.
    +func (c *ConfigState) Sdump(a ...interface{}) string {
    +	var buf bytes.Buffer
    +	fdump(c, &buf, a...)
    +	return buf.String()
    +}
    +
    +// convertArgs accepts a slice of arguments and returns a slice of the same
    +// length with each argument converted to a spew Formatter interface using
    +// the ConfigState associated with s.
    +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
    +	formatters = make([]interface{}, len(args))
    +	for index, arg := range args {
    +		formatters[index] = newFormatter(c, arg)
    +	}
    +	return formatters
    +}
    +
    +// NewDefaultConfig returns a ConfigState with the following default settings.
    +//
    +// 	Indent: " "
    +// 	MaxDepth: 0
    +// 	DisableMethods: false
    +// 	DisablePointerMethods: false
    +// 	ContinueOnMethod: false
    +// 	SortKeys: false
    +func NewDefaultConfig() *ConfigState {
    +	return &ConfigState{Indent: " "}
    +}
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/doc.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/doc.go
    new file mode 100644
    index 0000000..5be0c40
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/doc.go
    @@ -0,0 +1,202 @@
    +/*
    + * Copyright (c) 2013 Dave Collins 
    + *
    + * Permission to use, copy, modify, and distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    + */
    +
    +/*
    +Package spew implements a deep pretty printer for Go data structures to aid in
    +debugging.
    +
    +A quick overview of the additional features spew provides over the built-in
    +printing facilities for Go data types are as follows:
    +
    +	* Pointers are dereferenced and followed
    +	* Circular data structures are detected and handled properly
    +	* Custom Stringer/error interfaces are optionally invoked, including
    +	  on unexported types
    +	* Custom types which only implement the Stringer/error interfaces via
    +	  a pointer receiver are optionally invoked when passing non-pointer
    +	  variables
    +	* Byte arrays and slices are dumped like the hexdump -C command which
    +	  includes offsets, byte values in hex, and ASCII output (only when using
    +	  Dump style)
    +
    +There are two different approaches spew allows for dumping Go data structures:
    +
    +	* Dump style which prints with newlines, customizable indentation,
    +	  and additional debug information such as types and all pointer addresses
    +	  used to indirect to the final value
    +	* A custom Formatter interface that integrates cleanly with the standard fmt
    +	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
    +	  similar to the default %v while providing the additional functionality
    +	  outlined above and passing unsupported format verbs such as %x and %q
    +	  along to fmt
    +
    +Quick Start
    +
    +This section demonstrates how to quickly get started with spew.  See the
    +sections below for further details on formatting and configuration options.
    +
    +To dump a variable with full newlines, indentation, type, and pointer
    +information use Dump, Fdump, or Sdump:
    +	spew.Dump(myVar1, myVar2, ...)
    +	spew.Fdump(someWriter, myVar1, myVar2, ...)
    +	str := spew.Sdump(myVar1, myVar2, ...)
    +
    +Alternatively, if you would prefer to use format strings with a compacted inline
    +printing style, use the convenience wrappers Printf, Fprintf, etc with
    +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
    +%#+v (adds types and pointer addresses):
    +	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
    +	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
    +	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
    +	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
    +
    +Configuration Options
    +
    +Configuration of spew is handled by fields in the ConfigState type.  For
    +convenience, all of the top-level functions use a global state available
    +via the spew.Config global.
    +
    +It is also possible to create a ConfigState instance that provides methods
    +equivalent to the top-level functions.  This allows concurrent configuration
    +options.  See the ConfigState documentation for more details.
    +
    +The following configuration options are available:
    +	* Indent
    +		String to use for each indentation level for Dump functions.
    +		It is a single space by default.  A popular alternative is "\t".
    +
    +	* MaxDepth
    +		Maximum number of levels to descend into nested data structures.
    +		There is no limit by default.
    +
    +	* DisableMethods
    +		Disables invocation of error and Stringer interface methods.
    +		Method invocation is enabled by default.
    +
    +	* DisablePointerMethods
    +		Disables invocation of error and Stringer interface methods on types
    +		which only accept pointer receivers from non-pointer variables.
    +		Pointer method invocation is enabled by default.
    +
    +	* ContinueOnMethod
    +		Enables recursion into types after invoking error and Stringer interface
    +		methods. Recursion after method invocation is disabled by default.
    +
    +	* SortKeys
    +		Specifies map keys should be sorted before being printed. Use
    +		this to have a more deterministic, diffable output.  Note that
    +		only native types (bool, int, uint, floats, uintptr and string)
    +		and types which implement error or Stringer interfaces are
    +		supported with other types sorted according to the
    +		reflect.Value.String() output which guarantees display
    +		stability.  Natural map order is used by default.
    +
    +	* SpewKeys
    +		Specifies that, as a last resort attempt, map keys should be
    +		spewed to strings and sorted by those strings.  This is only
    +		considered if SortKeys is true.
    +
    +Dump Usage
    +
    +Simply call spew.Dump with a list of variables you want to dump:
    +
    +	spew.Dump(myVar1, myVar2, ...)
    +
    +You may also call spew.Fdump if you would prefer to output to an arbitrary
    +io.Writer.  For example, to dump to standard error:
    +
    +	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
    +
    +A third option is to call spew.Sdump to get the formatted output as a string:
    +
    +	str := spew.Sdump(myVar1, myVar2, ...)
    +
    +Sample Dump Output
    +
    +See the Dump example for details on the setup of the types and variables being
    +shown here.
    +
    +	(main.Foo) {
    +	 unexportedField: (*main.Bar)(0xf84002e210)({
    +	  flag: (main.Flag) flagTwo,
    +	  data: (uintptr) 
    +	 }),
    +	 ExportedField: (map[interface {}]interface {}) (len=1) {
    +	  (string) (len=3) "one": (bool) true
    +	 }
    +	}
    +
    +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
    +command as shown.
    +	([]uint8) (len=32 cap=32) {
    +	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
    +	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
    +	 00000020  31 32                                             |12|
    +	}
    +
    +Custom Formatter
    +
    +Spew provides a custom formatter that implements the fmt.Formatter interface
    +so that it integrates cleanly with standard fmt package printing functions. The
    +formatter is useful for inline printing of smaller data types similar to the
    +standard %v format specifier.
    +
    +The custom formatter only responds to the %v (most compact), %+v (adds pointer
    +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
    +combinations.  Any other verbs such as %x and %q will be sent to the the
    +standard fmt package for formatting.  In addition, the custom formatter ignores
    +the width and precision arguments (however they will still work on the format
    +specifiers not handled by the custom formatter).
    +
    +Custom Formatter Usage
    +
    +The simplest way to make use of the spew custom formatter is to call one of the
    +convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
    +functions have syntax you are most likely already familiar with:
    +
    +	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
    +	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
    +	spew.Println(myVar, myVar2)
    +	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
    +	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
    +
    +See the Index for the full list convenience functions.
    +
    +Sample Formatter Output
    +
    +Double pointer to a uint8:
    +	  %v: <**>5
    +	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
    +	 %#v: (**uint8)5
    +	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
    +
    +Pointer to circular struct with a uint8 field and a pointer to itself:
    +	  %v: <*>{1 <*>}
    +	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
    +	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
    +	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
    +
    +See the Printf example for details on the setup of variables being shown
    +here.
    +
    +Errors
    +
    +Since it is possible for custom Stringer/error interfaces to panic, spew
    +detects them and handles them internally by printing the panic information
    +inline with the output.  Since spew is intended to provide deep pretty printing
    +capabilities on structures, it intentionally does not return any errors.
    +*/
    +package spew
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/dump.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/dump.go
    new file mode 100644
    index 0000000..a0ff95e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/dump.go
    @@ -0,0 +1,509 @@
    +/*
    + * Copyright (c) 2013 Dave Collins 
    + *
    + * Permission to use, copy, modify, and distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    + */
    +
    +package spew
    +
    +import (
    +	"bytes"
    +	"encoding/hex"
    +	"fmt"
    +	"io"
    +	"os"
    +	"reflect"
    +	"regexp"
    +	"strconv"
    +	"strings"
    +)
    +
    +var (
    +	// uint8Type is a reflect.Type representing a uint8.  It is used to
    +	// convert cgo types to uint8 slices for hexdumping.
    +	uint8Type = reflect.TypeOf(uint8(0))
    +
    +	// cCharRE is a regular expression that matches a cgo char.
    +	// It is used to detect character arrays to hexdump them.
    +	cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
    +
    +	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
    +	// char.  It is used to detect unsigned character arrays to hexdump
    +	// them.
    +	cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
    +
    +	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
    +	// It is used to detect uint8_t arrays to hexdump them.
    +	cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
    +)
    +
    +// dumpState contains information about the state of a dump operation.
    +type dumpState struct {
    +	w                io.Writer
    +	depth            int
    +	pointers         map[uintptr]int
    +	ignoreNextType   bool
    +	ignoreNextIndent bool
    +	cs               *ConfigState
    +}
    +
    +// indent performs indentation according to the depth level and cs.Indent
    +// option.
    +func (d *dumpState) indent() {
    +	if d.ignoreNextIndent {
    +		d.ignoreNextIndent = false
    +		return
    +	}
    +	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
    +}
    +
    +// unpackValue returns values inside of non-nil interfaces when possible.
    +// This is useful for data types like structs, arrays, slices, and maps which
    +// can contain varying types packed inside an interface.
    +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
    +	if v.Kind() == reflect.Interface && !v.IsNil() {
    +		v = v.Elem()
    +	}
    +	return v
    +}
    +
    +// dumpPtr handles formatting of pointers by indirecting them as necessary.
    +func (d *dumpState) dumpPtr(v reflect.Value) {
    +	// Remove pointers at or below the current depth from map used to detect
    +	// circular refs.
    +	for k, depth := range d.pointers {
    +		if depth >= d.depth {
    +			delete(d.pointers, k)
    +		}
    +	}
    +
    +	// Keep list of all dereferenced pointers to show later.
    +	pointerChain := make([]uintptr, 0)
    +
    +	// Figure out how many levels of indirection there are by dereferencing
    +	// pointers and unpacking interfaces down the chain while detecting circular
    +	// references.
    +	nilFound := false
    +	cycleFound := false
    +	indirects := 0
    +	ve := v
    +	for ve.Kind() == reflect.Ptr {
    +		if ve.IsNil() {
    +			nilFound = true
    +			break
    +		}
    +		indirects++
    +		addr := ve.Pointer()
    +		pointerChain = append(pointerChain, addr)
    +		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
    +			cycleFound = true
    +			indirects--
    +			break
    +		}
    +		d.pointers[addr] = d.depth
    +
    +		ve = ve.Elem()
    +		if ve.Kind() == reflect.Interface {
    +			if ve.IsNil() {
    +				nilFound = true
    +				break
    +			}
    +			ve = ve.Elem()
    +		}
    +	}
    +
    +	// Display type information.
    +	d.w.Write(openParenBytes)
    +	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
    +	d.w.Write([]byte(ve.Type().String()))
    +	d.w.Write(closeParenBytes)
    +
    +	// Display pointer information.
    +	if len(pointerChain) > 0 {
    +		d.w.Write(openParenBytes)
    +		for i, addr := range pointerChain {
    +			if i > 0 {
    +				d.w.Write(pointerChainBytes)
    +			}
    +			printHexPtr(d.w, addr)
    +		}
    +		d.w.Write(closeParenBytes)
    +	}
    +
    +	// Display dereferenced value.
    +	d.w.Write(openParenBytes)
    +	switch {
    +	case nilFound == true:
    +		d.w.Write(nilAngleBytes)
    +
    +	case cycleFound == true:
    +		d.w.Write(circularBytes)
    +
    +	default:
    +		d.ignoreNextType = true
    +		d.dump(ve)
    +	}
    +	d.w.Write(closeParenBytes)
    +}
    +
    +// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
    +// reflection) arrays and slices are dumped in hexdump -C fashion.
    +func (d *dumpState) dumpSlice(v reflect.Value) {
    +	// Determine whether this type should be hex dumped or not.  Also,
    +	// for types which should be hexdumped, try to use the underlying data
    +	// first, then fall back to trying to convert them to a uint8 slice.
    +	var buf []uint8
    +	doConvert := false
    +	doHexDump := false
    +	numEntries := v.Len()
    +	if numEntries > 0 {
    +		vt := v.Index(0).Type()
    +		vts := vt.String()
    +		switch {
    +		// C types that need to be converted.
    +		case cCharRE.MatchString(vts):
    +			fallthrough
    +		case cUnsignedCharRE.MatchString(vts):
    +			fallthrough
    +		case cUint8tCharRE.MatchString(vts):
    +			doConvert = true
    +
    +		// Try to use existing uint8 slices and fall back to converting
    +		// and copying if that fails.
    +		case vt.Kind() == reflect.Uint8:
    +			// We need an addressable interface to convert the type
    +			// to a byte slice.  However, the reflect package won't
    +			// give us an interface on certain things like
    +			// unexported struct fields in order to enforce
    +			// visibility rules.  We use unsafe, when available, to
    +			// bypass these restrictions since this package does not
    +			// mutate the values.
    +			vs := v
    +			if !vs.CanInterface() || !vs.CanAddr() {
    +				vs = unsafeReflectValue(vs)
    +			}
    +			if !UnsafeDisabled {
    +				vs = vs.Slice(0, numEntries)
    +
    +				// Use the existing uint8 slice if it can be
    +				// type asserted.
    +				iface := vs.Interface()
    +				if slice, ok := iface.([]uint8); ok {
    +					buf = slice
    +					doHexDump = true
    +					break
    +				}
    +			}
    +
    +			// The underlying data needs to be converted if it can't
    +			// be type asserted to a uint8 slice.
    +			doConvert = true
    +		}
    +
    +		// Copy and convert the underlying type if needed.
    +		if doConvert && vt.ConvertibleTo(uint8Type) {
    +			// Convert and copy each element into a uint8 byte
    +			// slice.
    +			buf = make([]uint8, numEntries)
    +			for i := 0; i < numEntries; i++ {
    +				vv := v.Index(i)
    +				buf[i] = uint8(vv.Convert(uint8Type).Uint())
    +			}
    +			doHexDump = true
    +		}
    +	}
    +
    +	// Hexdump the entire slice as needed.
    +	if doHexDump {
    +		indent := strings.Repeat(d.cs.Indent, d.depth)
    +		str := indent + hex.Dump(buf)
    +		str = strings.Replace(str, "\n", "\n"+indent, -1)
    +		str = strings.TrimRight(str, d.cs.Indent)
    +		d.w.Write([]byte(str))
    +		return
    +	}
    +
    +	// Recursively call dump for each item.
    +	for i := 0; i < numEntries; i++ {
    +		d.dump(d.unpackValue(v.Index(i)))
    +		if i < (numEntries - 1) {
    +			d.w.Write(commaNewlineBytes)
    +		} else {
    +			d.w.Write(newlineBytes)
    +		}
    +	}
    +}
    +
    +// dump is the main workhorse for dumping a value.  It uses the passed reflect
    +// value to figure out what kind of object we are dealing with and formats it
    +// appropriately.  It is a recursive function, however circular data structures
    +// are detected and handled properly.
    +func (d *dumpState) dump(v reflect.Value) {
    +	// Handle invalid reflect values immediately.
    +	kind := v.Kind()
    +	if kind == reflect.Invalid {
    +		d.w.Write(invalidAngleBytes)
    +		return
    +	}
    +
    +	// Handle pointers specially.
    +	if kind == reflect.Ptr {
    +		d.indent()
    +		d.dumpPtr(v)
    +		return
    +	}
    +
    +	// Print type information unless already handled elsewhere.
    +	if !d.ignoreNextType {
    +		d.indent()
    +		d.w.Write(openParenBytes)
    +		d.w.Write([]byte(v.Type().String()))
    +		d.w.Write(closeParenBytes)
    +		d.w.Write(spaceBytes)
    +	}
    +	d.ignoreNextType = false
    +
    +	// Display length and capacity if the built-in len and cap functions
    +	// work with the value's kind and the len/cap itself is non-zero.
    +	valueLen, valueCap := 0, 0
    +	switch v.Kind() {
    +	case reflect.Array, reflect.Slice, reflect.Chan:
    +		valueLen, valueCap = v.Len(), v.Cap()
    +	case reflect.Map, reflect.String:
    +		valueLen = v.Len()
    +	}
    +	if valueLen != 0 || valueCap != 0 {
    +		d.w.Write(openParenBytes)
    +		if valueLen != 0 {
    +			d.w.Write(lenEqualsBytes)
    +			printInt(d.w, int64(valueLen), 10)
    +		}
    +		if valueCap != 0 {
    +			if valueLen != 0 {
    +				d.w.Write(spaceBytes)
    +			}
    +			d.w.Write(capEqualsBytes)
    +			printInt(d.w, int64(valueCap), 10)
    +		}
    +		d.w.Write(closeParenBytes)
    +		d.w.Write(spaceBytes)
    +	}
    +
    +	// Call Stringer/error interfaces if they exist and the handle methods flag
    +	// is enabled
    +	if !d.cs.DisableMethods {
    +		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
    +			if handled := handleMethods(d.cs, d.w, v); handled {
    +				return
    +			}
    +		}
    +	}
    +
    +	switch kind {
    +	case reflect.Invalid:
    +		// Do nothing.  We should never get here since invalid has already
    +		// been handled above.
    +
    +	case reflect.Bool:
    +		printBool(d.w, v.Bool())
    +
    +	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
    +		printInt(d.w, v.Int(), 10)
    +
    +	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
    +		printUint(d.w, v.Uint(), 10)
    +
    +	case reflect.Float32:
    +		printFloat(d.w, v.Float(), 32)
    +
    +	case reflect.Float64:
    +		printFloat(d.w, v.Float(), 64)
    +
    +	case reflect.Complex64:
    +		printComplex(d.w, v.Complex(), 32)
    +
    +	case reflect.Complex128:
    +		printComplex(d.w, v.Complex(), 64)
    +
    +	case reflect.Slice:
    +		if v.IsNil() {
    +			d.w.Write(nilAngleBytes)
    +			break
    +		}
    +		fallthrough
    +
    +	case reflect.Array:
    +		d.w.Write(openBraceNewlineBytes)
    +		d.depth++
    +		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
    +			d.indent()
    +			d.w.Write(maxNewlineBytes)
    +		} else {
    +			d.dumpSlice(v)
    +		}
    +		d.depth--
    +		d.indent()
    +		d.w.Write(closeBraceBytes)
    +
    +	case reflect.String:
    +		d.w.Write([]byte(strconv.Quote(v.String())))
    +
    +	case reflect.Interface:
    +		// The only time we should get here is for nil interfaces due to
    +		// unpackValue calls.
    +		if v.IsNil() {
    +			d.w.Write(nilAngleBytes)
    +		}
    +
    +	case reflect.Ptr:
    +		// Do nothing.  We should never get here since pointers have already
    +		// been handled above.
    +
    +	case reflect.Map:
    +		// nil maps should be indicated as different than empty maps
    +		if v.IsNil() {
    +			d.w.Write(nilAngleBytes)
    +			break
    +		}
    +
    +		d.w.Write(openBraceNewlineBytes)
    +		d.depth++
    +		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
    +			d.indent()
    +			d.w.Write(maxNewlineBytes)
    +		} else {
    +			numEntries := v.Len()
    +			keys := v.MapKeys()
    +			if d.cs.SortKeys {
    +				sortValues(keys, d.cs)
    +			}
    +			for i, key := range keys {
    +				d.dump(d.unpackValue(key))
    +				d.w.Write(colonSpaceBytes)
    +				d.ignoreNextIndent = true
    +				d.dump(d.unpackValue(v.MapIndex(key)))
    +				if i < (numEntries - 1) {
    +					d.w.Write(commaNewlineBytes)
    +				} else {
    +					d.w.Write(newlineBytes)
    +				}
    +			}
    +		}
    +		d.depth--
    +		d.indent()
    +		d.w.Write(closeBraceBytes)
    +
    +	case reflect.Struct:
    +		d.w.Write(openBraceNewlineBytes)
    +		d.depth++
    +		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
    +			d.indent()
    +			d.w.Write(maxNewlineBytes)
    +		} else {
    +			vt := v.Type()
    +			numFields := v.NumField()
    +			for i := 0; i < numFields; i++ {
    +				d.indent()
    +				vtf := vt.Field(i)
    +				d.w.Write([]byte(vtf.Name))
    +				d.w.Write(colonSpaceBytes)
    +				d.ignoreNextIndent = true
    +				d.dump(d.unpackValue(v.Field(i)))
    +				if i < (numFields - 1) {
    +					d.w.Write(commaNewlineBytes)
    +				} else {
    +					d.w.Write(newlineBytes)
    +				}
    +			}
    +		}
    +		d.depth--
    +		d.indent()
    +		d.w.Write(closeBraceBytes)
    +
    +	case reflect.Uintptr:
    +		printHexPtr(d.w, uintptr(v.Uint()))
    +
    +	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
    +		printHexPtr(d.w, v.Pointer())
    +
    +	// There were not any other types at the time this code was written, but
    +	// fall back to letting the default fmt package handle it in case any new
    +	// types are added.
    +	default:
    +		if v.CanInterface() {
    +			fmt.Fprintf(d.w, "%v", v.Interface())
    +		} else {
    +			fmt.Fprintf(d.w, "%v", v.String())
    +		}
    +	}
    +}
    +
    +// fdump is a helper function to consolidate the logic from the various public
    +// methods which take varying writers and config states.
    +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
    +	for _, arg := range a {
    +		if arg == nil {
    +			w.Write(interfaceBytes)
    +			w.Write(spaceBytes)
    +			w.Write(nilAngleBytes)
    +			w.Write(newlineBytes)
    +			continue
    +		}
    +
    +		d := dumpState{w: w, cs: cs}
    +		d.pointers = make(map[uintptr]int)
    +		d.dump(reflect.ValueOf(arg))
    +		d.w.Write(newlineBytes)
    +	}
    +}
    +
    +// Fdump formats and displays the passed arguments to io.Writer w.  It formats
    +// exactly the same as Dump.
    +func Fdump(w io.Writer, a ...interface{}) {
    +	fdump(&Config, w, a...)
    +}
    +
    +// Sdump returns a string with the passed arguments formatted exactly the same
    +// as Dump.
    +func Sdump(a ...interface{}) string {
    +	var buf bytes.Buffer
    +	fdump(&Config, &buf, a...)
    +	return buf.String()
    +}
    +
    +/*
    +Dump displays the passed parameters to standard out with newlines, customizable
    +indentation, and additional debug information such as complete types and all
    +pointer addresses used to indirect to the final value.  It provides the
    +following features over the built-in printing facilities provided by the fmt
    +package:
    +
    +	* Pointers are dereferenced and followed
    +	* Circular data structures are detected and handled properly
    +	* Custom Stringer/error interfaces are optionally invoked, including
    +	  on unexported types
    +	* Custom types which only implement the Stringer/error interfaces via
    +	  a pointer receiver are optionally invoked when passing non-pointer
    +	  variables
    +	* Byte arrays and slices are dumped like the hexdump -C command which
    +	  includes offsets, byte values in hex, and ASCII output
    +
    +The configuration options are controlled by an exported package global,
    +spew.Config.  See ConfigState for options documentation.
    +
    +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
    +get the formatted result as a string.
    +*/
    +func Dump(a ...interface{}) {
    +	fdump(&Config, os.Stdout, a...)
    +}
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/format.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/format.go
    new file mode 100644
    index 0000000..ecf3b80
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/format.go
    @@ -0,0 +1,419 @@
    +/*
    + * Copyright (c) 2013 Dave Collins 
    + *
    + * Permission to use, copy, modify, and distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    + */
    +
    +package spew
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"reflect"
    +	"strconv"
    +	"strings"
    +)
    +
    +// supportedFlags is a list of all the character flags supported by fmt package.
    +const supportedFlags = "0-+# "
    +
    +// formatState implements the fmt.Formatter interface and contains information
    +// about the state of a formatting operation.  The NewFormatter function can
    +// be used to get a new Formatter which can be used directly as arguments
    +// in standard fmt package printing calls.
    +type formatState struct {
    +	value          interface{}
    +	fs             fmt.State
    +	depth          int
    +	pointers       map[uintptr]int
    +	ignoreNextType bool
    +	cs             *ConfigState
    +}
    +
    +// buildDefaultFormat recreates the original format string without precision
    +// and width information to pass in to fmt.Sprintf in the case of an
    +// unrecognized type.  Unless new types are added to the language, this
    +// function won't ever be called.
    +func (f *formatState) buildDefaultFormat() (format string) {
    +	buf := bytes.NewBuffer(percentBytes)
    +
    +	for _, flag := range supportedFlags {
    +		if f.fs.Flag(int(flag)) {
    +			buf.WriteRune(flag)
    +		}
    +	}
    +
    +	buf.WriteRune('v')
    +
    +	format = buf.String()
    +	return format
    +}
    +
    +// constructOrigFormat recreates the original format string including precision
    +// and width information to pass along to the standard fmt package.  This allows
    +// automatic deferral of all format strings this package doesn't support.
    +func (f *formatState) constructOrigFormat(verb rune) (format string) {
    +	buf := bytes.NewBuffer(percentBytes)
    +
    +	for _, flag := range supportedFlags {
    +		if f.fs.Flag(int(flag)) {
    +			buf.WriteRune(flag)
    +		}
    +	}
    +
    +	if width, ok := f.fs.Width(); ok {
    +		buf.WriteString(strconv.Itoa(width))
    +	}
    +
    +	if precision, ok := f.fs.Precision(); ok {
    +		buf.Write(precisionBytes)
    +		buf.WriteString(strconv.Itoa(precision))
    +	}
    +
    +	buf.WriteRune(verb)
    +
    +	format = buf.String()
    +	return format
    +}
    +
    +// unpackValue returns values inside of non-nil interfaces when possible and
    +// ensures that types for values which have been unpacked from an interface
    +// are displayed when the show types flag is also set.
    +// This is useful for data types like structs, arrays, slices, and maps which
    +// can contain varying types packed inside an interface.
    +func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
    +	if v.Kind() == reflect.Interface {
    +		f.ignoreNextType = false
    +		if !v.IsNil() {
    +			v = v.Elem()
    +		}
    +	}
    +	return v
    +}
    +
    +// formatPtr handles formatting of pointers by indirecting them as necessary.
    +func (f *formatState) formatPtr(v reflect.Value) {
    +	// Display nil if top level pointer is nil.
    +	showTypes := f.fs.Flag('#')
    +	if v.IsNil() && (!showTypes || f.ignoreNextType) {
    +		f.fs.Write(nilAngleBytes)
    +		return
    +	}
    +
    +	// Remove pointers at or below the current depth from map used to detect
    +	// circular refs.
    +	for k, depth := range f.pointers {
    +		if depth >= f.depth {
    +			delete(f.pointers, k)
    +		}
    +	}
    +
    +	// Keep list of all dereferenced pointers to possibly show later.
    +	pointerChain := make([]uintptr, 0)
    +
    +	// Figure out how many levels of indirection there are by derferencing
    +	// pointers and unpacking interfaces down the chain while detecting circular
    +	// references.
    +	nilFound := false
    +	cycleFound := false
    +	indirects := 0
    +	ve := v
    +	for ve.Kind() == reflect.Ptr {
    +		if ve.IsNil() {
    +			nilFound = true
    +			break
    +		}
    +		indirects++
    +		addr := ve.Pointer()
    +		pointerChain = append(pointerChain, addr)
    +		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
    +			cycleFound = true
    +			indirects--
    +			break
    +		}
    +		f.pointers[addr] = f.depth
    +
    +		ve = ve.Elem()
    +		if ve.Kind() == reflect.Interface {
    +			if ve.IsNil() {
    +				nilFound = true
    +				break
    +			}
    +			ve = ve.Elem()
    +		}
    +	}
    +
    +	// Display type or indirection level depending on flags.
    +	if showTypes && !f.ignoreNextType {
    +		f.fs.Write(openParenBytes)
    +		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
    +		f.fs.Write([]byte(ve.Type().String()))
    +		f.fs.Write(closeParenBytes)
    +	} else {
    +		if nilFound || cycleFound {
    +			indirects += strings.Count(ve.Type().String(), "*")
    +		}
    +		f.fs.Write(openAngleBytes)
    +		f.fs.Write([]byte(strings.Repeat("*", indirects)))
    +		f.fs.Write(closeAngleBytes)
    +	}
    +
    +	// Display pointer information depending on flags.
    +	if f.fs.Flag('+') && (len(pointerChain) > 0) {
    +		f.fs.Write(openParenBytes)
    +		for i, addr := range pointerChain {
    +			if i > 0 {
    +				f.fs.Write(pointerChainBytes)
    +			}
    +			printHexPtr(f.fs, addr)
    +		}
    +		f.fs.Write(closeParenBytes)
    +	}
    +
    +	// Display dereferenced value.
    +	switch {
    +	case nilFound == true:
    +		f.fs.Write(nilAngleBytes)
    +
    +	case cycleFound == true:
    +		f.fs.Write(circularShortBytes)
    +
    +	default:
    +		f.ignoreNextType = true
    +		f.format(ve)
    +	}
    +}
    +
    +// format is the main workhorse for providing the Formatter interface.  It
    +// uses the passed reflect value to figure out what kind of object we are
    +// dealing with and formats it appropriately.  It is a recursive function,
    +// however circular data structures are detected and handled properly.
    +func (f *formatState) format(v reflect.Value) {
    +	// Handle invalid reflect values immediately.
    +	kind := v.Kind()
    +	if kind == reflect.Invalid {
    +		f.fs.Write(invalidAngleBytes)
    +		return
    +	}
    +
    +	// Handle pointers specially.
    +	if kind == reflect.Ptr {
    +		f.formatPtr(v)
    +		return
    +	}
    +
    +	// Print type information unless already handled elsewhere.
    +	if !f.ignoreNextType && f.fs.Flag('#') {
    +		f.fs.Write(openParenBytes)
    +		f.fs.Write([]byte(v.Type().String()))
    +		f.fs.Write(closeParenBytes)
    +	}
    +	f.ignoreNextType = false
    +
    +	// Call Stringer/error interfaces if they exist and the handle methods
    +	// flag is enabled.
    +	if !f.cs.DisableMethods {
    +		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
    +			if handled := handleMethods(f.cs, f.fs, v); handled {
    +				return
    +			}
    +		}
    +	}
    +
    +	switch kind {
    +	case reflect.Invalid:
    +		// Do nothing.  We should never get here since invalid has already
    +		// been handled above.
    +
    +	case reflect.Bool:
    +		printBool(f.fs, v.Bool())
    +
    +	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
    +		printInt(f.fs, v.Int(), 10)
    +
    +	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
    +		printUint(f.fs, v.Uint(), 10)
    +
    +	case reflect.Float32:
    +		printFloat(f.fs, v.Float(), 32)
    +
    +	case reflect.Float64:
    +		printFloat(f.fs, v.Float(), 64)
    +
    +	case reflect.Complex64:
    +		printComplex(f.fs, v.Complex(), 32)
    +
    +	case reflect.Complex128:
    +		printComplex(f.fs, v.Complex(), 64)
    +
    +	case reflect.Slice:
    +		if v.IsNil() {
    +			f.fs.Write(nilAngleBytes)
    +			break
    +		}
    +		fallthrough
    +
    +	case reflect.Array:
    +		f.fs.Write(openBracketBytes)
    +		f.depth++
    +		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
    +			f.fs.Write(maxShortBytes)
    +		} else {
    +			numEntries := v.Len()
    +			for i := 0; i < numEntries; i++ {
    +				if i > 0 {
    +					f.fs.Write(spaceBytes)
    +				}
    +				f.ignoreNextType = true
    +				f.format(f.unpackValue(v.Index(i)))
    +			}
    +		}
    +		f.depth--
    +		f.fs.Write(closeBracketBytes)
    +
    +	case reflect.String:
    +		f.fs.Write([]byte(v.String()))
    +
    +	case reflect.Interface:
    +		// The only time we should get here is for nil interfaces due to
    +		// unpackValue calls.
    +		if v.IsNil() {
    +			f.fs.Write(nilAngleBytes)
    +		}
    +
    +	case reflect.Ptr:
    +		// Do nothing.  We should never get here since pointers have already
    +		// been handled above.
    +
    +	case reflect.Map:
    +		// nil maps should be indicated as different than empty maps
    +		if v.IsNil() {
    +			f.fs.Write(nilAngleBytes)
    +			break
    +		}
    +
    +		f.fs.Write(openMapBytes)
    +		f.depth++
    +		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
    +			f.fs.Write(maxShortBytes)
    +		} else {
    +			keys := v.MapKeys()
    +			if f.cs.SortKeys {
    +				sortValues(keys, f.cs)
    +			}
    +			for i, key := range keys {
    +				if i > 0 {
    +					f.fs.Write(spaceBytes)
    +				}
    +				f.ignoreNextType = true
    +				f.format(f.unpackValue(key))
    +				f.fs.Write(colonBytes)
    +				f.ignoreNextType = true
    +				f.format(f.unpackValue(v.MapIndex(key)))
    +			}
    +		}
    +		f.depth--
    +		f.fs.Write(closeMapBytes)
    +
    +	case reflect.Struct:
    +		numFields := v.NumField()
    +		f.fs.Write(openBraceBytes)
    +		f.depth++
    +		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
    +			f.fs.Write(maxShortBytes)
    +		} else {
    +			vt := v.Type()
    +			for i := 0; i < numFields; i++ {
    +				if i > 0 {
    +					f.fs.Write(spaceBytes)
    +				}
    +				vtf := vt.Field(i)
    +				if f.fs.Flag('+') || f.fs.Flag('#') {
    +					f.fs.Write([]byte(vtf.Name))
    +					f.fs.Write(colonBytes)
    +				}
    +				f.format(f.unpackValue(v.Field(i)))
    +			}
    +		}
    +		f.depth--
    +		f.fs.Write(closeBraceBytes)
    +
    +	case reflect.Uintptr:
    +		printHexPtr(f.fs, uintptr(v.Uint()))
    +
    +	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
    +		printHexPtr(f.fs, v.Pointer())
    +
    +	// There were not any other types at the time this code was written, but
    +	// fall back to letting the default fmt package handle it if any get added.
    +	default:
    +		format := f.buildDefaultFormat()
    +		if v.CanInterface() {
    +			fmt.Fprintf(f.fs, format, v.Interface())
    +		} else {
    +			fmt.Fprintf(f.fs, format, v.String())
    +		}
    +	}
    +}
    +
    +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
    +// details.
    +func (f *formatState) Format(fs fmt.State, verb rune) {
    +	f.fs = fs
    +
    +	// Use standard formatting for verbs that are not v.
    +	if verb != 'v' {
    +		format := f.constructOrigFormat(verb)
    +		fmt.Fprintf(fs, format, f.value)
    +		return
    +	}
    +
    +	if f.value == nil {
    +		if fs.Flag('#') {
    +			fs.Write(interfaceBytes)
    +		}
    +		fs.Write(nilAngleBytes)
    +		return
    +	}
    +
    +	f.format(reflect.ValueOf(f.value))
    +}
    +
    +// newFormatter is a helper function to consolidate the logic from the various
    +// public methods which take varying config states.
    +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
    +	fs := &formatState{value: v, cs: cs}
    +	fs.pointers = make(map[uintptr]int)
    +	return fs
    +}
    +
    +/*
    +NewFormatter returns a custom formatter that satisfies the fmt.Formatter
    +interface.  As a result, it integrates cleanly with standard fmt package
    +printing functions.  The formatter is useful for inline printing of smaller data
    +types similar to the standard %v format specifier.
    +
    +The custom formatter only responds to the %v (most compact), %+v (adds pointer
    +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
    +combinations.  Any other verbs such as %x and %q will be sent to the the
    +standard fmt package for formatting.  In addition, the custom formatter ignores
    +the width and precision arguments (however they will still work on the format
    +specifiers not handled by the custom formatter).
    +
    +Typically this function shouldn't be called directly.  It is much easier to make
    +use of the custom formatter by calling one of the convenience functions such as
    +Printf, Println, or Fprintf.
    +*/
    +func NewFormatter(v interface{}) fmt.Formatter {
    +	return newFormatter(&Config, v)
    +}
    diff --git a/src/prometheus/vendor/github.com/davecgh/go-spew/spew/spew.go b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/spew.go
    new file mode 100644
    index 0000000..d8233f5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/davecgh/go-spew/spew/spew.go
    @@ -0,0 +1,148 @@
    +/*
    + * Copyright (c) 2013 Dave Collins 
    + *
    + * Permission to use, copy, modify, and distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    + */
    +
    +package spew
    +
    +import (
    +	"fmt"
    +	"io"
    +)
    +
    +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the formatted string as a value that satisfies error.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
    +func Errorf(format string, a ...interface{}) (err error) {
    +	return fmt.Errorf(format, convertArgs(a)...)
    +}
    +
    +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
    +func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
    +	return fmt.Fprint(w, convertArgs(a)...)
    +}
    +
    +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
    +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
    +	return fmt.Fprintf(w, format, convertArgs(a)...)
    +}
    +
    +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
    +// passed with a default Formatter interface returned by NewFormatter.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
    +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
    +	return fmt.Fprintln(w, convertArgs(a)...)
    +}
    +
    +// Print is a wrapper for fmt.Print that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
    +func Print(a ...interface{}) (n int, err error) {
    +	return fmt.Print(convertArgs(a)...)
    +}
    +
    +// Printf is a wrapper for fmt.Printf that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
    +func Printf(format string, a ...interface{}) (n int, err error) {
    +	return fmt.Printf(format, convertArgs(a)...)
    +}
    +
    +// Println is a wrapper for fmt.Println that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the number of bytes written and any write error encountered.  See
    +// NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
    +func Println(a ...interface{}) (n int, err error) {
    +	return fmt.Println(convertArgs(a)...)
    +}
    +
    +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the resulting string.  See NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
    +func Sprint(a ...interface{}) string {
    +	return fmt.Sprint(convertArgs(a)...)
    +}
    +
    +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
    +// passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the resulting string.  See NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
    +func Sprintf(format string, a ...interface{}) string {
    +	return fmt.Sprintf(format, convertArgs(a)...)
    +}
    +
    +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
    +// were passed with a default Formatter interface returned by NewFormatter.  It
    +// returns the resulting string.  See NewFormatter for formatting details.
    +//
    +// This function is shorthand for the following syntax:
    +//
    +//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
    +func Sprintln(a ...interface{}) string {
    +	return fmt.Sprintln(convertArgs(a)...)
    +}
    +
    +// convertArgs accepts a slice of arguments and returns a slice of the same
    +// length with each argument converted to a default spew Formatter interface.
    +func convertArgs(args []interface{}) (formatters []interface{}) {
    +	formatters = make([]interface{}, len(args))
    +	for index, arg := range args {
    +		formatters[index] = NewFormatter(arg)
    +	}
    +	return formatters
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/LICENSE b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/LICENSE
    new file mode 100644
    index 0000000..df83a9c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/LICENSE
    @@ -0,0 +1,8 @@
    +Copyright (c) 2012 Dave Grijalva
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
    new file mode 100644
    index 0000000..7fc1f79
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
    @@ -0,0 +1,97 @@
    +## Migration Guide from v2 -> v3
    +
    +Version 3 adds several new, frequently requested features.  To do so, it introduces a few breaking changes.  We've worked to keep these as minimal as possible.  This guide explains the breaking changes and how you can quickly update your code.
    +
    +### `Token.Claims` is now an interface type
    +
    +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`.  We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
    +
    +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior.  It is the default claims type when using `Parse`.  The usage is unchanged except you must type cast the claims property.
    +
    +The old example for parsing a token looked like this..
    +
    +```go
    +	if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
    +		fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
    +	}
    +```
    +
    +is now directly mapped to...
    +
    +```go
    +	if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
    +		claims := token.Claims.(jwt.MapClaims)
    +		fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
    +	}
    +```
    +
    +`StandardClaims` is designed to be embedded in your custom type.  You can supply a custom claims type with the new `ParseWithClaims` function.  Here's an example of using a custom claims type.
    +
    +```go
    +	type MyCustomClaims struct {
    +		User string
    +		*StandardClaims
    +	}
    +	
    +	if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
    +		claims := token.Claims.(*MyCustomClaims)
    +		fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
    +	}
    +```
    +
    +### `ParseFromRequest` has been moved
    +
    +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`.  The method signatues have also been augmented to receive a new argument: `Extractor`.
    +
    +`Extractors` do the work of picking the token string out of a request.  The interface is simple and composable.
    +
    +This simple parsing example:
    +
    +```go
    +	if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
    +		fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
    +	}
    +```
    +
    +is directly mapped to:
    +
    +```go
    +	if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
    +		claims := token.Claims.(jwt.MapClaims)
    +		fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
    +	}
    +```
    +
    +There are several concrete `Extractor` types provided for your convenience:
    +
    +* `HeaderExtractor` will search a list of headers until one contains content.
    +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
    +* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
    +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
    +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
    +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed.  A simple example is stripping the `Bearer ` text from a header
    +
    +
    +### RSA signing methods no longer accept `[]byte` keys
    +
    +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
    +
    +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`.  These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
    +
    +```go 
    +	func keyLookupFunc(*Token) (interface{}, error) {
    +		// Don't forget to validate the alg is what you expect:
    +		if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
    +			return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
    +		}
    +		
    +		// Look up key 
    +		key, err := lookupPublicKey(token.Header["kid"])
    +		if err != nil {
    +			return nil, err
    +		}
    +		
    +		// Unpack key from PEM encoded PKCS8
    +		return jwt.ParseRSAPublicKeyFromPEM(key)
    +	}
    +```
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/README.md b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/README.md
    new file mode 100644
    index 0000000..f48365f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/README.md
    @@ -0,0 +1,85 @@
    +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
    +
    +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
    +
    +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API.  We've tried to break as few things as possible, so there should just be a few type signature changes.  A full list of breaking changes is available in `VERSION_HISTORY.md`.  See `MIGRATION_GUIDE.md` for more information on updating your code.
    +
    +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/).  As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected.  There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API.  In the short term, please make sure your implementation verifies the `alg` is what you expect.
    +
    +
    +## What the heck is a JWT?
    +
    +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
    +
    +In short, it's a signed JSON object that does something useful (for example, authentication).  It's commonly used for `Bearer` tokens in Oauth 2.  A token is made of three parts, separated by `.`'s.  The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded.  The last part is the signature, encoded the same way.
    +
    +The first part is called the header.  It contains the necessary information for verifying the last part, the signature.  For example, which encryption method was used for signing and what key was used.
    +
    +The part in the middle is the interesting bit.  It's called the Claims and contains the actual stuff you care about.  Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
    +
    +## What's in the box?
    +
    +This library supports the parsing and verification as well as the generation and signing of JWTs.  Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
    +
    +## Examples
    +
    +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
    +
    +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
    +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
    +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
    +
    +## Extensions
    +
    +This library publishes all the necessary components for adding your own signing methods.  Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.  
    +
    +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
    +
    +## Compliance
    +
    +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: 
    +
    +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
    +
    +## Project Status & Versioning
    +
    +This library is considered production ready.  Feedback and feature requests are appreciated.  The API should be considered stable.  There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
    +
    +This project uses [Semantic Versioning 2.0.0](http://semver.org).  Accepted pull requests will land on `master`.  Periodically, versions will be tagged from `master`.  You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
    +
    +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users.  You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`.  It will do the right thing WRT semantic versioning.
    +
    +## Usage Tips
    +
    +### Signing vs Encryption
    +
    +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
    +
    +* The author of the token was in the possession of the signing secret
    +* The data has not been modified since it was signed
    +
    +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
    +
    +### Choosing a Signing Method
    +
    +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one.  The principal design decision is most likely going to be symmetric vs asymmetric.
    +
    +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
    +
    +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
    +
    +### JWT and OAuth
    +
    +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
    +
    +Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
    +
    +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to.  For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
    +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
    +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
    + 
    +## More
    +
    +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
    +
    +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration.  You'll also find several implementation examples in to documentation.
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
    new file mode 100644
    index 0000000..b605b45
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
    @@ -0,0 +1,105 @@
    +## `jwt-go` Version History
    +
    +#### 3.0.0
    +
    +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
    +	* Dropped support for `[]byte` keys when using RSA signing methods.  This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
    +	* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
    +	* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`.  The default value is type `MapClaims`, which is an alias to `map[string]interface{}`.  This makes it possible to use a custom type when decoding claims.
    +* Other Additions and Changes
    +	* Added `Claims` interface type to allow users to decode the claims into a custom type
    +	* Added `ParseWithClaims`, which takes a third argument of type `Claims`.  Use this function instead of `Parse` if you have a custom type you'd like to decode into.
    +	* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
    +	* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
    +	* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests.  Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
    +	* Added several new, more specific, validation errors to error type bitmask
    +	* Moved examples from README to executable example files
    +	* Signing method registry is now thread safe
    +	* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
    +
    +#### 2.7.0
    +
    +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
    +
    +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
    +* Error text for expired tokens includes how long it's been expired
    +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
    +* Documentation updates
    +
    +#### 2.6.0
    +
    +* Exposed inner error within ValidationError
    +* Fixed validation errors when using UseJSONNumber flag
    +* Added several unit tests
    +
    +#### 2.5.0
    +
    +* Added support for signing method none.  You shouldn't use this.  The API tries to make this clear.
    +* Updated/fixed some documentation
    +* Added more helpful error message when trying to parse tokens that begin with `BEARER `
    +
    +#### 2.4.0
    +
    +* Added new type, Parser, to allow for configuration of various parsing parameters
    +	* You can now specify a list of valid signing methods.  Anything outside this set will be rejected.
    +	* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
    +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
    +* Fixed some bugs with ECDSA parsing
    +
    +#### 2.3.0
    +
    +* Added support for ECDSA signing methods
    +* Added support for RSA PSS signing methods (requires go v1.4)
    +
    +#### 2.2.0
    +
    +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`.  Result will now be the parsed token and an error, instead of a panic.
    +
    +#### 2.1.0
    +
    +Backwards compatible API change that was missed in 2.0.0.
    +
    +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
    +
    +#### 2.0.0
    +
    +There were two major reasons for breaking backwards compatibility with this update.  The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations.  There will likely be no required code changes to support this change.
    +
    +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods.  Not all keys used for all signing methods have a single standard on-disk representation.  Requiring `[]byte` as the type for all keys proved too limiting.  Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys.  Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
    +
    +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
    +
    +* **Compatibility Breaking Changes**
    +	* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
    +	* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
    +	* `KeyFunc` now returns `interface{}` instead of `[]byte`
    +	* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
    +	* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
    +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`.  Specific sizes are now just instances of this type.
    +    * Added public package global `SigningMethodHS256`
    +    * Added public package global `SigningMethodHS384`
    +    * Added public package global `SigningMethodHS512`
    +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`.  Specific sizes are now just instances of this type.
    +    * Added public package global `SigningMethodRS256`
    +    * Added public package global `SigningMethodRS384`
    +    * Added public package global `SigningMethodRS512`
    +* Moved sample private key for HMAC tests from an inline value to a file on disk.  Value is unchanged.
    +* Refactored the RSA implementation to be easier to read
    +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
    +
    +#### 1.0.2
    +
    +* Fixed bug in parsing public keys from certificates
    +* Added more tests around the parsing of keys for RS256
    +* Code refactoring in RS256 implementation.  No functional changes
    +
    +#### 1.0.1
    +
    +* Fixed panic if RS256 signing method was passed an invalid key
    +
    +#### 1.0.0
    +
    +* First versioned release
    +* API stabilized
    +* Supports creating, signing, parsing, and validating JWT tokens
    +* Supports RS256 and HS256 signing methods
    \ No newline at end of file
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/claims.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/claims.go
    new file mode 100644
    index 0000000..f0228f0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/claims.go
    @@ -0,0 +1,134 @@
    +package jwt
    +
    +import (
    +	"crypto/subtle"
    +	"fmt"
    +	"time"
    +)
    +
    +// For a type to be a Claims object, it must just have a Valid method that determines
    +// if the token is invalid for any supported reason
    +type Claims interface {
    +	Valid() error
    +}
    +
    +// Structured version of Claims Section, as referenced at
    +// https://tools.ietf.org/html/rfc7519#section-4.1
    +// See examples for how to use this with your own claim types
    +type StandardClaims struct {
    +	Audience  string `json:"aud,omitempty"`
    +	ExpiresAt int64  `json:"exp,omitempty"`
    +	Id        string `json:"jti,omitempty"`
    +	IssuedAt  int64  `json:"iat,omitempty"`
    +	Issuer    string `json:"iss,omitempty"`
    +	NotBefore int64  `json:"nbf,omitempty"`
    +	Subject   string `json:"sub,omitempty"`
    +}
    +
    +// Validates time based claims "exp, iat, nbf".
    +// There is no accounting for clock skew.
    +// As well, if any of the above claims are not in the token, it will still
    +// be considered a valid claim.
    +func (c StandardClaims) Valid() error {
    +	vErr := new(ValidationError)
    +	now := TimeFunc().Unix()
    +
    +	// The claims below are optional, by default, so if they are set to the
    +	// default value in Go, let's not fail the verification for them.
    +	if c.VerifyExpiresAt(now, false) == false {
    +		delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
    +		vErr.Inner = fmt.Errorf("token is expired by %v", delta)
    +		vErr.Errors |= ValidationErrorExpired
    +	}
    +
    +	if c.VerifyIssuedAt(now, false) == false {
    +		vErr.Inner = fmt.Errorf("Token used before issued")
    +		vErr.Errors |= ValidationErrorIssuedAt
    +	}
    +
    +	if c.VerifyNotBefore(now, false) == false {
    +		vErr.Inner = fmt.Errorf("token is not valid yet")
    +		vErr.Errors |= ValidationErrorNotValidYet
    +	}
    +
    +	if vErr.valid() {
    +		return nil
    +	}
    +
    +	return vErr
    +}
    +
    +// Compares the aud claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
    +	return verifyAud(c.Audience, cmp, req)
    +}
    +
    +// Compares the exp claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
    +	return verifyExp(c.ExpiresAt, cmp, req)
    +}
    +
    +// Compares the iat claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
    +	return verifyIat(c.IssuedAt, cmp, req)
    +}
    +
    +// Compares the iss claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
    +	return verifyIss(c.Issuer, cmp, req)
    +}
    +
    +// Compares the nbf claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
    +	return verifyNbf(c.NotBefore, cmp, req)
    +}
    +
    +// ----- helpers
    +
    +func verifyAud(aud string, cmp string, required bool) bool {
    +	if aud == "" {
    +		return !required
    +	}
    +	if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
    +		return true
    +	} else {
    +		return false
    +	}
    +}
    +
    +func verifyExp(exp int64, now int64, required bool) bool {
    +	if exp == 0 {
    +		return !required
    +	}
    +	return now <= exp
    +}
    +
    +func verifyIat(iat int64, now int64, required bool) bool {
    +	if iat == 0 {
    +		return !required
    +	}
    +	return now >= iat
    +}
    +
    +func verifyIss(iss string, cmp string, required bool) bool {
    +	if iss == "" {
    +		return !required
    +	}
    +	if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
    +		return true
    +	} else {
    +		return false
    +	}
    +}
    +
    +func verifyNbf(nbf int64, now int64, required bool) bool {
    +	if nbf == 0 {
    +		return !required
    +	}
    +	return now >= nbf
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/doc.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/doc.go
    new file mode 100644
    index 0000000..a86dc1a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/doc.go
    @@ -0,0 +1,4 @@
    +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
    +//
    +// See README.md for more info.
    +package jwt
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
    new file mode 100644
    index 0000000..2f59a22
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
    @@ -0,0 +1,147 @@
    +package jwt
    +
    +import (
    +	"crypto"
    +	"crypto/ecdsa"
    +	"crypto/rand"
    +	"errors"
    +	"math/big"
    +)
    +
    +var (
    +	// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
    +	ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
    +)
    +
    +// Implements the ECDSA family of signing methods signing methods
    +type SigningMethodECDSA struct {
    +	Name      string
    +	Hash      crypto.Hash
    +	KeySize   int
    +	CurveBits int
    +}
    +
    +// Specific instances for EC256 and company
    +var (
    +	SigningMethodES256 *SigningMethodECDSA
    +	SigningMethodES384 *SigningMethodECDSA
    +	SigningMethodES512 *SigningMethodECDSA
    +)
    +
    +func init() {
    +	// ES256
    +	SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
    +	RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
    +		return SigningMethodES256
    +	})
    +
    +	// ES384
    +	SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
    +	RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
    +		return SigningMethodES384
    +	})
    +
    +	// ES512
    +	SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
    +	RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
    +		return SigningMethodES512
    +	})
    +}
    +
    +func (m *SigningMethodECDSA) Alg() string {
    +	return m.Name
    +}
    +
    +// Implements the Verify method from SigningMethod
    +// For this verify method, key must be an ecdsa.PublicKey struct
    +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
    +	var err error
    +
    +	// Decode the signature
    +	var sig []byte
    +	if sig, err = DecodeSegment(signature); err != nil {
    +		return err
    +	}
    +
    +	// Get the key
    +	var ecdsaKey *ecdsa.PublicKey
    +	switch k := key.(type) {
    +	case *ecdsa.PublicKey:
    +		ecdsaKey = k
    +	default:
    +		return ErrInvalidKeyType
    +	}
    +
    +	if len(sig) != 2*m.KeySize {
    +		return ErrECDSAVerification
    +	}
    +
    +	r := big.NewInt(0).SetBytes(sig[:m.KeySize])
    +	s := big.NewInt(0).SetBytes(sig[m.KeySize:])
    +
    +	// Create hasher
    +	if !m.Hash.Available() {
    +		return ErrHashUnavailable
    +	}
    +	hasher := m.Hash.New()
    +	hasher.Write([]byte(signingString))
    +
    +	// Verify the signature
    +	if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
    +		return nil
    +	} else {
    +		return ErrECDSAVerification
    +	}
    +}
    +
    +// Implements the Sign method from SigningMethod
    +// For this signing method, key must be an ecdsa.PrivateKey struct
    +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
    +	// Get the key
    +	var ecdsaKey *ecdsa.PrivateKey
    +	switch k := key.(type) {
    +	case *ecdsa.PrivateKey:
    +		ecdsaKey = k
    +	default:
    +		return "", ErrInvalidKeyType
    +	}
    +
    +	// Create the hasher
    +	if !m.Hash.Available() {
    +		return "", ErrHashUnavailable
    +	}
    +
    +	hasher := m.Hash.New()
    +	hasher.Write([]byte(signingString))
    +
    +	// Sign the string and return r, s
    +	if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
    +		curveBits := ecdsaKey.Curve.Params().BitSize
    +
    +		if m.CurveBits != curveBits {
    +			return "", ErrInvalidKey
    +		}
    +
    +		keyBytes := curveBits / 8
    +		if curveBits%8 > 0 {
    +			keyBytes += 1
    +		}
    +
    +		// We serialize the outpus (r and s) into big-endian byte arrays and pad
    +		// them with zeros on the left to make sure the sizes work out. Both arrays
    +		// must be keyBytes long, and the output must be 2*keyBytes long.
    +		rBytes := r.Bytes()
    +		rBytesPadded := make([]byte, keyBytes)
    +		copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
    +
    +		sBytes := s.Bytes()
    +		sBytesPadded := make([]byte, keyBytes)
    +		copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
    +
    +		out := append(rBytesPadded, sBytesPadded...)
    +
    +		return EncodeSegment(out), nil
    +	} else {
    +		return "", err
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
    new file mode 100644
    index 0000000..d19624b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
    @@ -0,0 +1,67 @@
    +package jwt
    +
    +import (
    +	"crypto/ecdsa"
    +	"crypto/x509"
    +	"encoding/pem"
    +	"errors"
    +)
    +
    +var (
    +	ErrNotECPublicKey  = errors.New("Key is not a valid ECDSA public key")
    +	ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
    +)
    +
    +// Parse PEM encoded Elliptic Curve Private Key Structure
    +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
    +	var err error
    +
    +	// Parse PEM block
    +	var block *pem.Block
    +	if block, _ = pem.Decode(key); block == nil {
    +		return nil, ErrKeyMustBePEMEncoded
    +	}
    +
    +	// Parse the key
    +	var parsedKey interface{}
    +	if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
    +		return nil, err
    +	}
    +
    +	var pkey *ecdsa.PrivateKey
    +	var ok bool
    +	if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
    +		return nil, ErrNotECPrivateKey
    +	}
    +
    +	return pkey, nil
    +}
    +
    +// Parse PEM encoded PKCS1 or PKCS8 public key
    +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
    +	var err error
    +
    +	// Parse PEM block
    +	var block *pem.Block
    +	if block, _ = pem.Decode(key); block == nil {
    +		return nil, ErrKeyMustBePEMEncoded
    +	}
    +
    +	// Parse the key
    +	var parsedKey interface{}
    +	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
    +		if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
    +			parsedKey = cert.PublicKey
    +		} else {
    +			return nil, err
    +		}
    +	}
    +
    +	var pkey *ecdsa.PublicKey
    +	var ok bool
    +	if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
    +		return nil, ErrNotECPublicKey
    +	}
    +
    +	return pkey, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/errors.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/errors.go
    new file mode 100644
    index 0000000..1c93024
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/errors.go
    @@ -0,0 +1,59 @@
    +package jwt
    +
    +import (
    +	"errors"
    +)
    +
    +// Error constants
    +var (
    +	ErrInvalidKey      = errors.New("key is invalid")
    +	ErrInvalidKeyType  = errors.New("key is of invalid type")
    +	ErrHashUnavailable = errors.New("the requested hash function is unavailable")
    +)
    +
    +// The errors that might occur when parsing and validating a token
    +const (
    +	ValidationErrorMalformed        uint32 = 1 << iota // Token is malformed
    +	ValidationErrorUnverifiable                        // Token could not be verified because of signing problems
    +	ValidationErrorSignatureInvalid                    // Signature validation failed
    +
    +	// Standard Claim validation errors
    +	ValidationErrorAudience      // AUD validation failed
    +	ValidationErrorExpired       // EXP validation failed
    +	ValidationErrorIssuedAt      // IAT validation failed
    +	ValidationErrorIssuer        // ISS validation failed
    +	ValidationErrorNotValidYet   // NBF validation failed
    +	ValidationErrorId            // JTI validation failed
    +	ValidationErrorClaimsInvalid // Generic claims validation error
    +)
    +
    +// Helper for constructing a ValidationError with a string error message
    +func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
    +	return &ValidationError{
    +		text:   errorText,
    +		Errors: errorFlags,
    +	}
    +}
    +
    +// The error from Parse if token is not valid
    +type ValidationError struct {
    +	Inner  error  // stores the error returned by external dependencies, i.e.: KeyFunc
    +	Errors uint32 // bitfield.  see ValidationError... constants
    +	text   string // errors that do not have a valid error just have text
    +}
    +
    +// Validation error is an error type
    +func (e ValidationError) Error() string {
    +	if e.Inner != nil {
    +		return e.Inner.Error()
    +	} else if e.text != "" {
    +		return e.text
    +	} else {
    +		return "token is invalid"
    +	}
    +}
    +
    +// No errors
    +func (e *ValidationError) valid() bool {
    +	return e.Errors == 0
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/hmac.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/hmac.go
    new file mode 100644
    index 0000000..c229919
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/hmac.go
    @@ -0,0 +1,94 @@
    +package jwt
    +
    +import (
    +	"crypto"
    +	"crypto/hmac"
    +	"errors"
    +)
    +
    +// Implements the HMAC-SHA family of signing methods signing methods
    +type SigningMethodHMAC struct {
    +	Name string
    +	Hash crypto.Hash
    +}
    +
    +// Specific instances for HS256 and company
    +var (
    +	SigningMethodHS256  *SigningMethodHMAC
    +	SigningMethodHS384  *SigningMethodHMAC
    +	SigningMethodHS512  *SigningMethodHMAC
    +	ErrSignatureInvalid = errors.New("signature is invalid")
    +)
    +
    +func init() {
    +	// HS256
    +	SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
    +	RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
    +		return SigningMethodHS256
    +	})
    +
    +	// HS384
    +	SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
    +	RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
    +		return SigningMethodHS384
    +	})
    +
    +	// HS512
    +	SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
    +	RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
    +		return SigningMethodHS512
    +	})
    +}
    +
    +func (m *SigningMethodHMAC) Alg() string {
    +	return m.Name
    +}
    +
    +// Verify the signature of HSXXX tokens.  Returns nil if the signature is valid.
    +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
    +	// Verify the key is the right type
    +	keyBytes, ok := key.([]byte)
    +	if !ok {
    +		return ErrInvalidKeyType
    +	}
    +
    +	// Decode signature, for comparison
    +	sig, err := DecodeSegment(signature)
    +	if err != nil {
    +		return err
    +	}
    +
    +	// Can we use the specified hashing method?
    +	if !m.Hash.Available() {
    +		return ErrHashUnavailable
    +	}
    +
    +	// This signing method is symmetric, so we validate the signature
    +	// by reproducing the signature from the signing string and key, then
    +	// comparing that against the provided signature.
    +	hasher := hmac.New(m.Hash.New, keyBytes)
    +	hasher.Write([]byte(signingString))
    +	if !hmac.Equal(sig, hasher.Sum(nil)) {
    +		return ErrSignatureInvalid
    +	}
    +
    +	// No validation errors.  Signature is good.
    +	return nil
    +}
    +
    +// Implements the Sign method from SigningMethod for this signing method.
    +// Key must be []byte
    +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
    +	if keyBytes, ok := key.([]byte); ok {
    +		if !m.Hash.Available() {
    +			return "", ErrHashUnavailable
    +		}
    +
    +		hasher := hmac.New(m.Hash.New, keyBytes)
    +		hasher.Write([]byte(signingString))
    +
    +		return EncodeSegment(hasher.Sum(nil)), nil
    +	}
    +
    +	return "", ErrInvalidKey
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/map_claims.go
    new file mode 100644
    index 0000000..291213c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/map_claims.go
    @@ -0,0 +1,94 @@
    +package jwt
    +
    +import (
    +	"encoding/json"
    +	"errors"
    +	// "fmt"
    +)
    +
    +// Claims type that uses the map[string]interface{} for JSON decoding
    +// This is the default claims type if you don't supply one
    +type MapClaims map[string]interface{}
    +
    +// Compares the aud claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
    +	aud, _ := m["aud"].(string)
    +	return verifyAud(aud, cmp, req)
    +}
    +
    +// Compares the exp claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
    +	switch exp := m["exp"].(type) {
    +	case float64:
    +		return verifyExp(int64(exp), cmp, req)
    +	case json.Number:
    +		v, _ := exp.Int64()
    +		return verifyExp(v, cmp, req)
    +	}
    +	return req == false
    +}
    +
    +// Compares the iat claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
    +	switch iat := m["iat"].(type) {
    +	case float64:
    +		return verifyIat(int64(iat), cmp, req)
    +	case json.Number:
    +		v, _ := iat.Int64()
    +		return verifyIat(v, cmp, req)
    +	}
    +	return req == false
    +}
    +
    +// Compares the iss claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
    +	iss, _ := m["iss"].(string)
    +	return verifyIss(iss, cmp, req)
    +}
    +
    +// Compares the nbf claim against cmp.
    +// If required is false, this method will return true if the value matches or is unset
    +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
    +	switch nbf := m["nbf"].(type) {
    +	case float64:
    +		return verifyNbf(int64(nbf), cmp, req)
    +	case json.Number:
    +		v, _ := nbf.Int64()
    +		return verifyNbf(v, cmp, req)
    +	}
    +	return req == false
    +}
    +
    +// Validates time based claims "exp, iat, nbf".
    +// There is no accounting for clock skew.
    +// As well, if any of the above claims are not in the token, it will still
    +// be considered a valid claim.
    +func (m MapClaims) Valid() error {
    +	vErr := new(ValidationError)
    +	now := TimeFunc().Unix()
    +
    +	if m.VerifyExpiresAt(now, false) == false {
    +		vErr.Inner = errors.New("Token is expired")
    +		vErr.Errors |= ValidationErrorExpired
    +	}
    +
    +	if m.VerifyIssuedAt(now, false) == false {
    +		vErr.Inner = errors.New("Token used before issued")
    +		vErr.Errors |= ValidationErrorIssuedAt
    +	}
    +
    +	if m.VerifyNotBefore(now, false) == false {
    +		vErr.Inner = errors.New("Token is not valid yet")
    +		vErr.Errors |= ValidationErrorNotValidYet
    +	}
    +
    +	if vErr.valid() {
    +		return nil
    +	}
    +
    +	return vErr
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/none.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/none.go
    new file mode 100644
    index 0000000..f04d189
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/none.go
    @@ -0,0 +1,52 @@
    +package jwt
    +
    +// Implements the none signing method.  This is required by the spec
    +// but you probably should never use it.
    +var SigningMethodNone *signingMethodNone
    +
    +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
    +
    +var NoneSignatureTypeDisallowedError error
    +
    +type signingMethodNone struct{}
    +type unsafeNoneMagicConstant string
    +
    +func init() {
    +	SigningMethodNone = &signingMethodNone{}
    +	NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
    +
    +	RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
    +		return SigningMethodNone
    +	})
    +}
    +
    +func (m *signingMethodNone) Alg() string {
    +	return "none"
    +}
    +
    +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
    +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
    +	// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
    +	// accepting 'none' signing method
    +	if _, ok := key.(unsafeNoneMagicConstant); !ok {
    +		return NoneSignatureTypeDisallowedError
    +	}
    +	// If signing method is none, signature must be an empty string
    +	if signature != "" {
    +		return NewValidationError(
    +			"'none' signing method with non-empty signature",
    +			ValidationErrorSignatureInvalid,
    +		)
    +	}
    +
    +	// Accept 'none' signing method.
    +	return nil
    +}
    +
    +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
    +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
    +	if _, ok := key.(unsafeNoneMagicConstant); ok {
    +		return "", nil
    +	}
    +	return "", NoneSignatureTypeDisallowedError
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/parser.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/parser.go
    new file mode 100644
    index 0000000..7bf1c4e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/parser.go
    @@ -0,0 +1,131 @@
    +package jwt
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"strings"
    +)
    +
    +type Parser struct {
    +	ValidMethods         []string // If populated, only these methods will be considered valid
    +	UseJSONNumber        bool     // Use JSON Number format in JSON decoder
    +	SkipClaimsValidation bool     // Skip claims validation during token parsing
    +}
    +
    +// Parse, validate, and return a token.
    +// keyFunc will receive the parsed token and should return the key for validating.
    +// If everything is kosher, err will be nil
    +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
    +	return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
    +}
    +
    +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
    +	parts := strings.Split(tokenString, ".")
    +	if len(parts) != 3 {
    +		return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
    +	}
    +
    +	var err error
    +	token := &Token{Raw: tokenString}
    +
    +	// parse Header
    +	var headerBytes []byte
    +	if headerBytes, err = DecodeSegment(parts[0]); err != nil {
    +		if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
    +			return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
    +		}
    +		return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
    +	}
    +	if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
    +		return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
    +	}
    +
    +	// parse Claims
    +	var claimBytes []byte
    +	token.Claims = claims
    +
    +	if claimBytes, err = DecodeSegment(parts[1]); err != nil {
    +		return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
    +	}
    +	dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
    +	if p.UseJSONNumber {
    +		dec.UseNumber()
    +	}
    +	// JSON Decode.  Special case for map type to avoid weird pointer behavior
    +	if c, ok := token.Claims.(MapClaims); ok {
    +		err = dec.Decode(&c)
    +	} else {
    +		err = dec.Decode(&claims)
    +	}
    +	// Handle decode error
    +	if err != nil {
    +		return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
    +	}
    +
    +	// Lookup signature method
    +	if method, ok := token.Header["alg"].(string); ok {
    +		if token.Method = GetSigningMethod(method); token.Method == nil {
    +			return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
    +		}
    +	} else {
    +		return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
    +	}
    +
    +	// Verify signing method is in the required set
    +	if p.ValidMethods != nil {
    +		var signingMethodValid = false
    +		var alg = token.Method.Alg()
    +		for _, m := range p.ValidMethods {
    +			if m == alg {
    +				signingMethodValid = true
    +				break
    +			}
    +		}
    +		if !signingMethodValid {
    +			// signing method is not in the listed set
    +			return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
    +		}
    +	}
    +
    +	// Lookup key
    +	var key interface{}
    +	if keyFunc == nil {
    +		// keyFunc was not provided.  short circuiting validation
    +		return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
    +	}
    +	if key, err = keyFunc(token); err != nil {
    +		// keyFunc returned an error
    +		return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
    +	}
    +
    +	vErr := &ValidationError{}
    +
    +	// Validate Claims
    +	if !p.SkipClaimsValidation {
    +		if err := token.Claims.Valid(); err != nil {
    +
    +			// If the Claims Valid returned an error, check if it is a validation error,
    +			// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
    +			if e, ok := err.(*ValidationError); !ok {
    +				vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
    +			} else {
    +				vErr = e
    +			}
    +		}
    +	}
    +
    +	// Perform validation
    +	token.Signature = parts[2]
    +	if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
    +		vErr.Inner = err
    +		vErr.Errors |= ValidationErrorSignatureInvalid
    +	}
    +
    +	if vErr.valid() {
    +		token.Valid = true
    +		return token, nil
    +	}
    +
    +	return token, vErr
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa.go
    new file mode 100644
    index 0000000..0ae0b19
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa.go
    @@ -0,0 +1,100 @@
    +package jwt
    +
    +import (
    +	"crypto"
    +	"crypto/rand"
    +	"crypto/rsa"
    +)
    +
    +// Implements the RSA family of signing methods signing methods
    +type SigningMethodRSA struct {
    +	Name string
    +	Hash crypto.Hash
    +}
    +
    +// Specific instances for RS256 and company
    +var (
    +	SigningMethodRS256 *SigningMethodRSA
    +	SigningMethodRS384 *SigningMethodRSA
    +	SigningMethodRS512 *SigningMethodRSA
    +)
    +
    +func init() {
    +	// RS256
    +	SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
    +	RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
    +		return SigningMethodRS256
    +	})
    +
    +	// RS384
    +	SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
    +	RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
    +		return SigningMethodRS384
    +	})
    +
    +	// RS512
    +	SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
    +	RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
    +		return SigningMethodRS512
    +	})
    +}
    +
    +func (m *SigningMethodRSA) Alg() string {
    +	return m.Name
    +}
    +
    +// Implements the Verify method from SigningMethod
    +// For this signing method, must be an rsa.PublicKey structure.
    +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
    +	var err error
    +
    +	// Decode the signature
    +	var sig []byte
    +	if sig, err = DecodeSegment(signature); err != nil {
    +		return err
    +	}
    +
    +	var rsaKey *rsa.PublicKey
    +	var ok bool
    +
    +	if rsaKey, ok = key.(*rsa.PublicKey); !ok {
    +		return ErrInvalidKeyType
    +	}
    +
    +	// Create hasher
    +	if !m.Hash.Available() {
    +		return ErrHashUnavailable
    +	}
    +	hasher := m.Hash.New()
    +	hasher.Write([]byte(signingString))
    +
    +	// Verify the signature
    +	return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
    +}
    +
    +// Implements the Sign method from SigningMethod
    +// For this signing method, must be an rsa.PrivateKey structure.
    +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
    +	var rsaKey *rsa.PrivateKey
    +	var ok bool
    +
    +	// Validate type of key
    +	if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
    +		return "", ErrInvalidKey
    +	}
    +
    +	// Create the hasher
    +	if !m.Hash.Available() {
    +		return "", ErrHashUnavailable
    +	}
    +
    +	hasher := m.Hash.New()
    +	hasher.Write([]byte(signingString))
    +
    +	// Sign the string and return the encoded bytes
    +	if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
    +		return EncodeSegment(sigBytes), nil
    +	} else {
    +		return "", err
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
    new file mode 100644
    index 0000000..10ee9db
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
    @@ -0,0 +1,126 @@
    +// +build go1.4
    +
    +package jwt
    +
    +import (
    +	"crypto"
    +	"crypto/rand"
    +	"crypto/rsa"
    +)
    +
    +// Implements the RSAPSS family of signing methods signing methods
    +type SigningMethodRSAPSS struct {
    +	*SigningMethodRSA
    +	Options *rsa.PSSOptions
    +}
    +
    +// Specific instances for RS/PS and company
    +var (
    +	SigningMethodPS256 *SigningMethodRSAPSS
    +	SigningMethodPS384 *SigningMethodRSAPSS
    +	SigningMethodPS512 *SigningMethodRSAPSS
    +)
    +
    +func init() {
    +	// PS256
    +	SigningMethodPS256 = &SigningMethodRSAPSS{
    +		&SigningMethodRSA{
    +			Name: "PS256",
    +			Hash: crypto.SHA256,
    +		},
    +		&rsa.PSSOptions{
    +			SaltLength: rsa.PSSSaltLengthAuto,
    +			Hash:       crypto.SHA256,
    +		},
    +	}
    +	RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
    +		return SigningMethodPS256
    +	})
    +
    +	// PS384
    +	SigningMethodPS384 = &SigningMethodRSAPSS{
    +		&SigningMethodRSA{
    +			Name: "PS384",
    +			Hash: crypto.SHA384,
    +		},
    +		&rsa.PSSOptions{
    +			SaltLength: rsa.PSSSaltLengthAuto,
    +			Hash:       crypto.SHA384,
    +		},
    +	}
    +	RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
    +		return SigningMethodPS384
    +	})
    +
    +	// PS512
    +	SigningMethodPS512 = &SigningMethodRSAPSS{
    +		&SigningMethodRSA{
    +			Name: "PS512",
    +			Hash: crypto.SHA512,
    +		},
    +		&rsa.PSSOptions{
    +			SaltLength: rsa.PSSSaltLengthAuto,
    +			Hash:       crypto.SHA512,
    +		},
    +	}
    +	RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
    +		return SigningMethodPS512
    +	})
    +}
    +
    +// Implements the Verify method from SigningMethod
    +// For this verify method, key must be an rsa.PublicKey struct
    +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
    +	var err error
    +
    +	// Decode the signature
    +	var sig []byte
    +	if sig, err = DecodeSegment(signature); err != nil {
    +		return err
    +	}
    +
    +	var rsaKey *rsa.PublicKey
    +	switch k := key.(type) {
    +	case *rsa.PublicKey:
    +		rsaKey = k
    +	default:
    +		return ErrInvalidKey
    +	}
    +
    +	// Create hasher
    +	if !m.Hash.Available() {
    +		return ErrHashUnavailable
    +	}
    +	hasher := m.Hash.New()
    +	hasher.Write([]byte(signingString))
    +
    +	return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
    +}
    +
    +// Implements the Sign method from SigningMethod
    +// For this signing method, key must be an rsa.PrivateKey struct
    +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
    +	var rsaKey *rsa.PrivateKey
    +
    +	switch k := key.(type) {
    +	case *rsa.PrivateKey:
    +		rsaKey = k
    +	default:
    +		return "", ErrInvalidKeyType
    +	}
    +
    +	// Create the hasher
    +	if !m.Hash.Available() {
    +		return "", ErrHashUnavailable
    +	}
    +
    +	hasher := m.Hash.New()
    +	hasher.Write([]byte(signingString))
    +
    +	// Sign the string and return the encoded bytes
    +	if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
    +		return EncodeSegment(sigBytes), nil
    +	} else {
    +		return "", err
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
    new file mode 100644
    index 0000000..213a90d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
    @@ -0,0 +1,69 @@
    +package jwt
    +
    +import (
    +	"crypto/rsa"
    +	"crypto/x509"
    +	"encoding/pem"
    +	"errors"
    +)
    +
    +var (
    +	ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
    +	ErrNotRSAPrivateKey    = errors.New("Key is not a valid RSA private key")
    +	ErrNotRSAPublicKey     = errors.New("Key is not a valid RSA public key")
    +)
    +
    +// Parse PEM encoded PKCS1 or PKCS8 private key
    +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
    +	var err error
    +
    +	// Parse PEM block
    +	var block *pem.Block
    +	if block, _ = pem.Decode(key); block == nil {
    +		return nil, ErrKeyMustBePEMEncoded
    +	}
    +
    +	var parsedKey interface{}
    +	if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
    +		if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
    +			return nil, err
    +		}
    +	}
    +
    +	var pkey *rsa.PrivateKey
    +	var ok bool
    +	if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
    +		return nil, ErrNotRSAPrivateKey
    +	}
    +
    +	return pkey, nil
    +}
    +
    +// Parse PEM encoded PKCS1 or PKCS8 public key
    +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
    +	var err error
    +
    +	// Parse PEM block
    +	var block *pem.Block
    +	if block, _ = pem.Decode(key); block == nil {
    +		return nil, ErrKeyMustBePEMEncoded
    +	}
    +
    +	// Parse the key
    +	var parsedKey interface{}
    +	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
    +		if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
    +			parsedKey = cert.PublicKey
    +		} else {
    +			return nil, err
    +		}
    +	}
    +
    +	var pkey *rsa.PublicKey
    +	var ok bool
    +	if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
    +		return nil, ErrNotRSAPublicKey
    +	}
    +
    +	return pkey, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/signing_method.go
    new file mode 100644
    index 0000000..ed1f212
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/signing_method.go
    @@ -0,0 +1,35 @@
    +package jwt
    +
    +import (
    +	"sync"
    +)
    +
    +var signingMethods = map[string]func() SigningMethod{}
    +var signingMethodLock = new(sync.RWMutex)
    +
    +// Implement SigningMethod to add new methods for signing or verifying tokens.
    +type SigningMethod interface {
    +	Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
    +	Sign(signingString string, key interface{}) (string, error)    // Returns encoded signature or error
    +	Alg() string                                                   // returns the alg identifier for this method (example: 'HS256')
    +}
    +
    +// Register the "alg" name and a factory function for signing method.
    +// This is typically done during init() in the method's implementation
    +func RegisterSigningMethod(alg string, f func() SigningMethod) {
    +	signingMethodLock.Lock()
    +	defer signingMethodLock.Unlock()
    +
    +	signingMethods[alg] = f
    +}
    +
    +// Get a signing method from an "alg" string
    +func GetSigningMethod(alg string) (method SigningMethod) {
    +	signingMethodLock.RLock()
    +	defer signingMethodLock.RUnlock()
    +
    +	if methodF, ok := signingMethods[alg]; ok {
    +		method = methodF()
    +	}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/dgrijalva/jwt-go/token.go b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/token.go
    new file mode 100644
    index 0000000..d637e08
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/dgrijalva/jwt-go/token.go
    @@ -0,0 +1,108 @@
    +package jwt
    +
    +import (
    +	"encoding/base64"
    +	"encoding/json"
    +	"strings"
    +	"time"
    +)
    +
    +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
    +// You can override it to use another time value.  This is useful for testing or if your
    +// server uses a different time zone than your tokens.
    +var TimeFunc = time.Now
    +
    +// Parse methods use this callback function to supply
    +// the key for verification.  The function receives the parsed,
    +// but unverified Token.  This allows you to use properties in the
    +// Header of the token (such as `kid`) to identify which key to use.
    +type Keyfunc func(*Token) (interface{}, error)
    +
    +// A JWT Token.  Different fields will be used depending on whether you're
    +// creating or parsing/verifying a token.
    +type Token struct {
    +	Raw       string                 // The raw token.  Populated when you Parse a token
    +	Method    SigningMethod          // The signing method used or to be used
    +	Header    map[string]interface{} // The first segment of the token
    +	Claims    Claims                 // The second segment of the token
    +	Signature string                 // The third segment of the token.  Populated when you Parse a token
    +	Valid     bool                   // Is the token valid?  Populated when you Parse/Verify a token
    +}
    +
    +// Create a new Token.  Takes a signing method
    +func New(method SigningMethod) *Token {
    +	return NewWithClaims(method, MapClaims{})
    +}
    +
    +func NewWithClaims(method SigningMethod, claims Claims) *Token {
    +	return &Token{
    +		Header: map[string]interface{}{
    +			"typ": "JWT",
    +			"alg": method.Alg(),
    +		},
    +		Claims: claims,
    +		Method: method,
    +	}
    +}
    +
    +// Get the complete, signed token
    +func (t *Token) SignedString(key interface{}) (string, error) {
    +	var sig, sstr string
    +	var err error
    +	if sstr, err = t.SigningString(); err != nil {
    +		return "", err
    +	}
    +	if sig, err = t.Method.Sign(sstr, key); err != nil {
    +		return "", err
    +	}
    +	return strings.Join([]string{sstr, sig}, "."), nil
    +}
    +
    +// Generate the signing string.  This is the
    +// most expensive part of the whole deal.  Unless you
    +// need this for something special, just go straight for
    +// the SignedString.
    +func (t *Token) SigningString() (string, error) {
    +	var err error
    +	parts := make([]string, 2)
    +	for i, _ := range parts {
    +		var jsonValue []byte
    +		if i == 0 {
    +			if jsonValue, err = json.Marshal(t.Header); err != nil {
    +				return "", err
    +			}
    +		} else {
    +			if jsonValue, err = json.Marshal(t.Claims); err != nil {
    +				return "", err
    +			}
    +		}
    +
    +		parts[i] = EncodeSegment(jsonValue)
    +	}
    +	return strings.Join(parts, "."), nil
    +}
    +
    +// Parse, validate, and return a token.
    +// keyFunc will receive the parsed token and should return the key for validating.
    +// If everything is kosher, err will be nil
    +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
    +	return new(Parser).Parse(tokenString, keyFunc)
    +}
    +
    +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
    +	return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
    +}
    +
    +// Encode JWT specific base64url encoding with padding stripped
    +func EncodeSegment(seg []byte) string {
    +	return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
    +}
    +
    +// Decode JWT specific base64url encoding with padding stripped
    +func DecodeSegment(seg string) ([]byte, error) {
    +	if l := len(seg) % 4; l > 0 {
    +		seg += strings.Repeat("=", 4-l)
    +	}
    +
    +	return base64.URLEncoding.DecodeString(seg)
    +}
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/LICENSE b/src/prometheus/vendor/github.com/docker/distribution/LICENSE
    new file mode 100644
    index 0000000..e06d208
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/LICENSE
    @@ -0,0 +1,202 @@
    +Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "{}"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright {yyyy} {name of copyright owner}
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    +
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/digest/digest.go b/src/prometheus/vendor/github.com/docker/distribution/digest/digest.go
    new file mode 100644
    index 0000000..31d821b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/digest/digest.go
    @@ -0,0 +1,139 @@
    +package digest
    +
    +import (
    +	"fmt"
    +	"hash"
    +	"io"
    +	"regexp"
    +	"strings"
    +)
    +
    +const (
    +	// DigestSha256EmptyTar is the canonical sha256 digest of empty data
    +	DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
    +)
    +
    +// Digest allows simple protection of hex formatted digest strings, prefixed
    +// by their algorithm. Strings of type Digest have some guarantee of being in
    +// the correct format and it provides quick access to the components of a
    +// digest string.
    +//
    +// The following is an example of the contents of Digest types:
    +//
    +// 	sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
    +//
    +// This allows to abstract the digest behind this type and work only in those
    +// terms.
    +type Digest string
    +
    +// NewDigest returns a Digest from alg and a hash.Hash object.
    +func NewDigest(alg Algorithm, h hash.Hash) Digest {
    +	return NewDigestFromBytes(alg, h.Sum(nil))
    +}
    +
    +// NewDigestFromBytes returns a new digest from the byte contents of p.
    +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
    +// functions. This is also useful for rebuilding digests from binary
    +// serializations.
    +func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
    +	return Digest(fmt.Sprintf("%s:%x", alg, p))
    +}
    +
    +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
    +func NewDigestFromHex(alg, hex string) Digest {
    +	return Digest(fmt.Sprintf("%s:%s", alg, hex))
    +}
    +
    +// DigestRegexp matches valid digest types.
    +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
    +
    +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
    +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
    +
    +var (
    +	// ErrDigestInvalidFormat returned when digest format invalid.
    +	ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
    +
    +	// ErrDigestInvalidLength returned when digest has invalid length.
    +	ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
    +
    +	// ErrDigestUnsupported returned when the digest algorithm is unsupported.
    +	ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
    +)
    +
    +// ParseDigest parses s and returns the validated digest object. An error will
    +// be returned if the format is invalid.
    +func ParseDigest(s string) (Digest, error) {
    +	d := Digest(s)
    +
    +	return d, d.Validate()
    +}
    +
    +// FromReader returns the most valid digest for the underlying content using
    +// the canonical digest algorithm.
    +func FromReader(rd io.Reader) (Digest, error) {
    +	return Canonical.FromReader(rd)
    +}
    +
    +// FromBytes digests the input and returns a Digest.
    +func FromBytes(p []byte) Digest {
    +	return Canonical.FromBytes(p)
    +}
    +
    +// Validate checks that the contents of d is a valid digest, returning an
    +// error if not.
    +func (d Digest) Validate() error {
    +	s := string(d)
    +
    +	if !DigestRegexpAnchored.MatchString(s) {
    +		return ErrDigestInvalidFormat
    +	}
    +
    +	i := strings.Index(s, ":")
    +	if i < 0 {
    +		return ErrDigestInvalidFormat
    +	}
    +
    +	// case: "sha256:" with no hex.
    +	if i+1 == len(s) {
    +		return ErrDigestInvalidFormat
    +	}
    +
    +	switch algorithm := Algorithm(s[:i]); algorithm {
    +	case SHA256, SHA384, SHA512:
    +		if algorithm.Size()*2 != len(s[i+1:]) {
    +			return ErrDigestInvalidLength
    +		}
    +		break
    +	default:
    +		return ErrDigestUnsupported
    +	}
    +
    +	return nil
    +}
    +
    +// Algorithm returns the algorithm portion of the digest. This will panic if
    +// the underlying digest is not in a valid format.
    +func (d Digest) Algorithm() Algorithm {
    +	return Algorithm(d[:d.sepIndex()])
    +}
    +
    +// Hex returns the hex digest portion of the digest. This will panic if the
    +// underlying digest is not in a valid format.
    +func (d Digest) Hex() string {
    +	return string(d[d.sepIndex()+1:])
    +}
    +
    +func (d Digest) String() string {
    +	return string(d)
    +}
    +
    +func (d Digest) sepIndex() int {
    +	i := strings.Index(string(d), ":")
    +
    +	if i < 0 {
    +		panic("could not find ':' in digest: " + d)
    +	}
    +
    +	return i
    +}
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/digest/digester.go b/src/prometheus/vendor/github.com/docker/distribution/digest/digester.go
    new file mode 100644
    index 0000000..f3105a4
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/digest/digester.go
    @@ -0,0 +1,155 @@
    +package digest
    +
    +import (
    +	"crypto"
    +	"fmt"
    +	"hash"
    +	"io"
    +)
    +
    +// Algorithm identifies and implementation of a digester by an identifier.
    +// Note the that this defines both the hash algorithm used and the string
    +// encoding.
    +type Algorithm string
    +
    +// supported digest types
    +const (
    +	SHA256 Algorithm = "sha256" // sha256 with hex encoding
    +	SHA384 Algorithm = "sha384" // sha384 with hex encoding
    +	SHA512 Algorithm = "sha512" // sha512 with hex encoding
    +
    +	// Canonical is the primary digest algorithm used with the distribution
    +	// project. Other digests may be used but this one is the primary storage
    +	// digest.
    +	Canonical = SHA256
    +)
    +
    +var (
    +	// TODO(stevvooe): Follow the pattern of the standard crypto package for
    +	// registration of digests. Effectively, we are a registerable set and
    +	// common symbol access.
    +
    +	// algorithms maps values to hash.Hash implementations. Other algorithms
    +	// may be available but they cannot be calculated by the digest package.
    +	algorithms = map[Algorithm]crypto.Hash{
    +		SHA256: crypto.SHA256,
    +		SHA384: crypto.SHA384,
    +		SHA512: crypto.SHA512,
    +	}
    +)
    +
    +// Available returns true if the digest type is available for use. If this
    +// returns false, New and Hash will return nil.
    +func (a Algorithm) Available() bool {
    +	h, ok := algorithms[a]
    +	if !ok {
    +		return false
    +	}
    +
    +	// check availability of the hash, as well
    +	return h.Available()
    +}
    +
    +func (a Algorithm) String() string {
    +	return string(a)
    +}
    +
    +// Size returns number of bytes returned by the hash.
    +func (a Algorithm) Size() int {
    +	h, ok := algorithms[a]
    +	if !ok {
    +		return 0
    +	}
    +	return h.Size()
    +}
    +
    +// Set implemented to allow use of Algorithm as a command line flag.
    +func (a *Algorithm) Set(value string) error {
    +	if value == "" {
    +		*a = Canonical
    +	} else {
    +		// just do a type conversion, support is queried with Available.
    +		*a = Algorithm(value)
    +	}
    +
    +	return nil
    +}
    +
    +// New returns a new digester for the specified algorithm. If the algorithm
    +// does not have a digester implementation, nil will be returned. This can be
    +// checked by calling Available before calling New.
    +func (a Algorithm) New() Digester {
    +	return &digester{
    +		alg:  a,
    +		hash: a.Hash(),
    +	}
    +}
    +
    +// Hash returns a new hash as used by the algorithm. If not available, the
    +// method will panic. Check Algorithm.Available() before calling.
    +func (a Algorithm) Hash() hash.Hash {
    +	if !a.Available() {
    +		// NOTE(stevvooe): A missing hash is usually a programming error that
    +		// must be resolved at compile time. We don't import in the digest
    +		// package to allow users to choose their hash implementation (such as
    +		// when using stevvooe/resumable or a hardware accelerated package).
    +		//
    +		// Applications that may want to resolve the hash at runtime should
    +		// call Algorithm.Available before call Algorithm.Hash().
    +		panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
    +	}
    +
    +	return algorithms[a].New()
    +}
    +
    +// FromReader returns the digest of the reader using the algorithm.
    +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
    +	digester := a.New()
    +
    +	if _, err := io.Copy(digester.Hash(), rd); err != nil {
    +		return "", err
    +	}
    +
    +	return digester.Digest(), nil
    +}
    +
    +// FromBytes digests the input and returns a Digest.
    +func (a Algorithm) FromBytes(p []byte) Digest {
    +	digester := a.New()
    +
    +	if _, err := digester.Hash().Write(p); err != nil {
    +		// Writes to a Hash should never fail. None of the existing
    +		// hash implementations in the stdlib or hashes vendored
    +		// here can return errors from Write. Having a panic in this
    +		// condition instead of having FromBytes return an error value
    +		// avoids unnecessary error handling paths in all callers.
    +		panic("write to hash function returned error: " + err.Error())
    +	}
    +
    +	return digester.Digest()
    +}
    +
    +// TODO(stevvooe): Allow resolution of verifiers using the digest type and
    +// this registration system.
    +
    +// Digester calculates the digest of written data. Writes should go directly
    +// to the return value of Hash, while calling Digest will return the current
    +// value of the digest.
    +type Digester interface {
    +	Hash() hash.Hash // provides direct access to underlying hash instance.
    +	Digest() Digest
    +}
    +
    +// digester provides a simple digester definition that embeds a hasher.
    +type digester struct {
    +	alg  Algorithm
    +	hash hash.Hash
    +}
    +
    +func (d *digester) Hash() hash.Hash {
    +	return d.hash
    +}
    +
    +func (d *digester) Digest() Digest {
    +	return NewDigest(d.alg, d.hash)
    +}
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/digest/doc.go b/src/prometheus/vendor/github.com/docker/distribution/digest/doc.go
    new file mode 100644
    index 0000000..f64b0db
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/digest/doc.go
    @@ -0,0 +1,42 @@
    +// Package digest provides a generalized type to opaquely represent message
    +// digests and their operations within the registry. The Digest type is
    +// designed to serve as a flexible identifier in a content-addressable system.
    +// More importantly, it provides tools and wrappers to work with
    +// hash.Hash-based digests with little effort.
    +//
    +// Basics
    +//
    +// The format of a digest is simply a string with two parts, dubbed the
    +// "algorithm" and the "digest", separated by a colon:
    +//
    +// 	:
    +//
    +// An example of a sha256 digest representation follows:
    +//
    +// 	sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
    +//
    +// In this case, the string "sha256" is the algorithm and the hex bytes are
    +// the "digest".
    +//
    +// Because the Digest type is simply a string, once a valid Digest is
    +// obtained, comparisons are cheap, quick and simple to express with the
    +// standard equality operator.
    +//
    +// Verification
    +//
    +// The main benefit of using the Digest type is simple verification against a
    +// given digest. The Verifier interface, modeled after the stdlib hash.Hash
    +// interface, provides a common write sink for digest verification. After
    +// writing is complete, calling the Verifier.Verified method will indicate
    +// whether or not the stream of bytes matches the target digest.
    +//
    +// Missing Features
    +//
    +// In addition to the above, we intend to add the following features to this
    +// package:
    +//
    +// 1. A Digester type that supports write sink digest calculation.
    +//
    +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
    +//
    +package digest
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/digest/set.go b/src/prometheus/vendor/github.com/docker/distribution/digest/set.go
    new file mode 100644
    index 0000000..4b9313c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/digest/set.go
    @@ -0,0 +1,245 @@
    +package digest
    +
    +import (
    +	"errors"
    +	"sort"
    +	"strings"
    +	"sync"
    +)
    +
    +var (
    +	// ErrDigestNotFound is used when a matching digest
    +	// could not be found in a set.
    +	ErrDigestNotFound = errors.New("digest not found")
    +
    +	// ErrDigestAmbiguous is used when multiple digests
    +	// are found in a set. None of the matching digests
    +	// should be considered valid matches.
    +	ErrDigestAmbiguous = errors.New("ambiguous digest string")
    +)
    +
    +// Set is used to hold a unique set of digests which
    +// may be easily referenced by easily  referenced by a string
    +// representation of the digest as well as short representation.
    +// The uniqueness of the short representation is based on other
    +// digests in the set. If digests are omitted from this set,
    +// collisions in a larger set may not be detected, therefore it
    +// is important to always do short representation lookups on
    +// the complete set of digests. To mitigate collisions, an
    +// appropriately long short code should be used.
    +type Set struct {
    +	mutex   sync.RWMutex
    +	entries digestEntries
    +}
    +
    +// NewSet creates an empty set of digests
    +// which may have digests added.
    +func NewSet() *Set {
    +	return &Set{
    +		entries: digestEntries{},
    +	}
    +}
    +
    +// checkShortMatch checks whether two digests match as either whole
    +// values or short values. This function does not test equality,
    +// rather whether the second value could match against the first
    +// value.
    +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
    +	if len(hex) == len(shortHex) {
    +		if hex != shortHex {
    +			return false
    +		}
    +		if len(shortAlg) > 0 && string(alg) != shortAlg {
    +			return false
    +		}
    +	} else if !strings.HasPrefix(hex, shortHex) {
    +		return false
    +	} else if len(shortAlg) > 0 && string(alg) != shortAlg {
    +		return false
    +	}
    +	return true
    +}
    +
    +// Lookup looks for a digest matching the given string representation.
    +// If no digests could be found ErrDigestNotFound will be returned
    +// with an empty digest value. If multiple matches are found
    +// ErrDigestAmbiguous will be returned with an empty digest value.
    +func (dst *Set) Lookup(d string) (Digest, error) {
    +	dst.mutex.RLock()
    +	defer dst.mutex.RUnlock()
    +	if len(dst.entries) == 0 {
    +		return "", ErrDigestNotFound
    +	}
    +	var (
    +		searchFunc func(int) bool
    +		alg        Algorithm
    +		hex        string
    +	)
    +	dgst, err := ParseDigest(d)
    +	if err == ErrDigestInvalidFormat {
    +		hex = d
    +		searchFunc = func(i int) bool {
    +			return dst.entries[i].val >= d
    +		}
    +	} else {
    +		hex = dgst.Hex()
    +		alg = dgst.Algorithm()
    +		searchFunc = func(i int) bool {
    +			if dst.entries[i].val == hex {
    +				return dst.entries[i].alg >= alg
    +			}
    +			return dst.entries[i].val >= hex
    +		}
    +	}
    +	idx := sort.Search(len(dst.entries), searchFunc)
    +	if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
    +		return "", ErrDigestNotFound
    +	}
    +	if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
    +		return dst.entries[idx].digest, nil
    +	}
    +	if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
    +		return "", ErrDigestAmbiguous
    +	}
    +
    +	return dst.entries[idx].digest, nil
    +}
    +
    +// Add adds the given digest to the set. An error will be returned
    +// if the given digest is invalid. If the digest already exists in the
    +// set, this operation will be a no-op.
    +func (dst *Set) Add(d Digest) error {
    +	if err := d.Validate(); err != nil {
    +		return err
    +	}
    +	dst.mutex.Lock()
    +	defer dst.mutex.Unlock()
    +	entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
    +	searchFunc := func(i int) bool {
    +		if dst.entries[i].val == entry.val {
    +			return dst.entries[i].alg >= entry.alg
    +		}
    +		return dst.entries[i].val >= entry.val
    +	}
    +	idx := sort.Search(len(dst.entries), searchFunc)
    +	if idx == len(dst.entries) {
    +		dst.entries = append(dst.entries, entry)
    +		return nil
    +	} else if dst.entries[idx].digest == d {
    +		return nil
    +	}
    +
    +	entries := append(dst.entries, nil)
    +	copy(entries[idx+1:], entries[idx:len(entries)-1])
    +	entries[idx] = entry
    +	dst.entries = entries
    +	return nil
    +}
    +
    +// Remove removes the given digest from the set. An err will be
    +// returned if the given digest is invalid. If the digest does
    +// not exist in the set, this operation will be a no-op.
    +func (dst *Set) Remove(d Digest) error {
    +	if err := d.Validate(); err != nil {
    +		return err
    +	}
    +	dst.mutex.Lock()
    +	defer dst.mutex.Unlock()
    +	entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
    +	searchFunc := func(i int) bool {
    +		if dst.entries[i].val == entry.val {
    +			return dst.entries[i].alg >= entry.alg
    +		}
    +		return dst.entries[i].val >= entry.val
    +	}
    +	idx := sort.Search(len(dst.entries), searchFunc)
    +	// Not found if idx is after or value at idx is not digest
    +	if idx == len(dst.entries) || dst.entries[idx].digest != d {
    +		return nil
    +	}
    +
    +	entries := dst.entries
    +	copy(entries[idx:], entries[idx+1:])
    +	entries = entries[:len(entries)-1]
    +	dst.entries = entries
    +
    +	return nil
    +}
    +
    +// All returns all the digests in the set
    +func (dst *Set) All() []Digest {
    +	dst.mutex.RLock()
    +	defer dst.mutex.RUnlock()
    +	retValues := make([]Digest, len(dst.entries))
    +	for i := range dst.entries {
    +		retValues[i] = dst.entries[i].digest
    +	}
    +
    +	return retValues
    +}
    +
    +// ShortCodeTable returns a map of Digest to unique short codes. The
    +// length represents the minimum value, the maximum length may be the
    +// entire value of digest if uniqueness cannot be achieved without the
    +// full value. This function will attempt to make short codes as short
    +// as possible to be unique.
    +func ShortCodeTable(dst *Set, length int) map[Digest]string {
    +	dst.mutex.RLock()
    +	defer dst.mutex.RUnlock()
    +	m := make(map[Digest]string, len(dst.entries))
    +	l := length
    +	resetIdx := 0
    +	for i := 0; i < len(dst.entries); i++ {
    +		var short string
    +		extended := true
    +		for extended {
    +			extended = false
    +			if len(dst.entries[i].val) <= l {
    +				short = dst.entries[i].digest.String()
    +			} else {
    +				short = dst.entries[i].val[:l]
    +				for j := i + 1; j < len(dst.entries); j++ {
    +					if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
    +						if j > resetIdx {
    +							resetIdx = j
    +						}
    +						extended = true
    +					} else {
    +						break
    +					}
    +				}
    +				if extended {
    +					l++
    +				}
    +			}
    +		}
    +		m[dst.entries[i].digest] = short
    +		if i >= resetIdx {
    +			l = length
    +		}
    +	}
    +	return m
    +}
    +
    +type digestEntry struct {
    +	alg    Algorithm
    +	val    string
    +	digest Digest
    +}
    +
    +type digestEntries []*digestEntry
    +
    +func (d digestEntries) Len() int {
    +	return len(d)
    +}
    +
    +func (d digestEntries) Less(i, j int) bool {
    +	if d[i].val != d[j].val {
    +		return d[i].val < d[j].val
    +	}
    +	return d[i].alg < d[j].alg
    +}
    +
    +func (d digestEntries) Swap(i, j int) {
    +	d[i], d[j] = d[j], d[i]
    +}
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/digest/verifiers.go b/src/prometheus/vendor/github.com/docker/distribution/digest/verifiers.go
    new file mode 100644
    index 0000000..9af3be1
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/digest/verifiers.go
    @@ -0,0 +1,44 @@
    +package digest
    +
    +import (
    +	"hash"
    +	"io"
    +)
    +
    +// Verifier presents a general verification interface to be used with message
    +// digests and other byte stream verifications. Users instantiate a Verifier
    +// from one of the various methods, write the data under test to it then check
    +// the result with the Verified method.
    +type Verifier interface {
    +	io.Writer
    +
    +	// Verified will return true if the content written to Verifier matches
    +	// the digest.
    +	Verified() bool
    +}
    +
    +// NewDigestVerifier returns a verifier that compares the written bytes
    +// against a passed in digest.
    +func NewDigestVerifier(d Digest) (Verifier, error) {
    +	if err := d.Validate(); err != nil {
    +		return nil, err
    +	}
    +
    +	return hashVerifier{
    +		hash:   d.Algorithm().Hash(),
    +		digest: d,
    +	}, nil
    +}
    +
    +type hashVerifier struct {
    +	digest Digest
    +	hash   hash.Hash
    +}
    +
    +func (hv hashVerifier) Write(p []byte) (n int, err error) {
    +	return hv.hash.Write(p)
    +}
    +
    +func (hv hashVerifier) Verified() bool {
    +	return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
    +}
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/reference/reference.go b/src/prometheus/vendor/github.com/docker/distribution/reference/reference.go
    new file mode 100644
    index 0000000..bb09fa2
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/reference/reference.go
    @@ -0,0 +1,334 @@
    +// Package reference provides a general type to represent any way of referencing images within the registry.
    +// Its main purpose is to abstract tags and digests (content-addressable hash).
    +//
    +// Grammar
    +//
    +// 	reference                       := name [ ":" tag ] [ "@" digest ]
    +//	name                            := [hostname '/'] component ['/' component]*
    +//	hostname                        := hostcomponent ['.' hostcomponent]* [':' port-number]
    +//	hostcomponent                   := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
    +//	port-number                     := /[0-9]+/
    +//	component                       := alpha-numeric [separator alpha-numeric]*
    +// 	alpha-numeric                   := /[a-z0-9]+/
    +//	separator                       := /[_.]|__|[-]*/
    +//
    +//	tag                             := /[\w][\w.-]{0,127}/
    +//
    +//	digest                          := digest-algorithm ":" digest-hex
    +//	digest-algorithm                := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
    +//	digest-algorithm-separator      := /[+.-_]/
    +//	digest-algorithm-component      := /[A-Za-z][A-Za-z0-9]*/
    +//	digest-hex                      := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
    +package reference
    +
    +import (
    +	"errors"
    +	"fmt"
    +
    +	"github.com/docker/distribution/digest"
    +)
    +
    +const (
    +	// NameTotalLengthMax is the maximum total number of characters in a repository name.
    +	NameTotalLengthMax = 255
    +)
    +
    +var (
    +	// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
    +	ErrReferenceInvalidFormat = errors.New("invalid reference format")
    +
    +	// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
    +	ErrTagInvalidFormat = errors.New("invalid tag format")
    +
    +	// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
    +	ErrDigestInvalidFormat = errors.New("invalid digest format")
    +
    +	// ErrNameEmpty is returned for empty, invalid repository names.
    +	ErrNameEmpty = errors.New("repository name must have at least one component")
    +
    +	// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
    +	ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
    +)
    +
    +// Reference is an opaque object reference identifier that may include
    +// modifiers such as a hostname, name, tag, and digest.
    +type Reference interface {
    +	// String returns the full reference
    +	String() string
    +}
    +
    +// Field provides a wrapper type for resolving correct reference types when
    +// working with encoding.
    +type Field struct {
    +	reference Reference
    +}
    +
    +// AsField wraps a reference in a Field for encoding.
    +func AsField(reference Reference) Field {
    +	return Field{reference}
    +}
    +
    +// Reference unwraps the reference type from the field to
    +// return the Reference object. This object should be
    +// of the appropriate type to further check for different
    +// reference types.
    +func (f Field) Reference() Reference {
    +	return f.reference
    +}
    +
    +// MarshalText serializes the field to byte text which
    +// is the string of the reference.
    +func (f Field) MarshalText() (p []byte, err error) {
    +	return []byte(f.reference.String()), nil
    +}
    +
    +// UnmarshalText parses text bytes by invoking the
    +// reference parser to ensure the appropriately
    +// typed reference object is wrapped by field.
    +func (f *Field) UnmarshalText(p []byte) error {
    +	r, err := Parse(string(p))
    +	if err != nil {
    +		return err
    +	}
    +
    +	f.reference = r
    +	return nil
    +}
    +
    +// Named is an object with a full name
    +type Named interface {
    +	Reference
    +	Name() string
    +}
    +
    +// Tagged is an object which has a tag
    +type Tagged interface {
    +	Reference
    +	Tag() string
    +}
    +
    +// NamedTagged is an object including a name and tag.
    +type NamedTagged interface {
    +	Named
    +	Tag() string
    +}
    +
    +// Digested is an object which has a digest
    +// in which it can be referenced by
    +type Digested interface {
    +	Reference
    +	Digest() digest.Digest
    +}
    +
    +// Canonical reference is an object with a fully unique
    +// name including a name with hostname and digest
    +type Canonical interface {
    +	Named
    +	Digest() digest.Digest
    +}
    +
    +// SplitHostname splits a named reference into a
    +// hostname and name string. If no valid hostname is
    +// found, the hostname is empty and the full value
    +// is returned as name
    +func SplitHostname(named Named) (string, string) {
    +	name := named.Name()
    +	match := anchoredNameRegexp.FindStringSubmatch(name)
    +	if match == nil || len(match) != 3 {
    +		return "", name
    +	}
    +	return match[1], match[2]
    +}
    +
    +// Parse parses s and returns a syntactically valid Reference.
    +// If an error was encountered it is returned, along with a nil Reference.
    +// NOTE: Parse will not handle short digests.
    +func Parse(s string) (Reference, error) {
    +	matches := ReferenceRegexp.FindStringSubmatch(s)
    +	if matches == nil {
    +		if s == "" {
    +			return nil, ErrNameEmpty
    +		}
    +		// TODO(dmcgowan): Provide more specific and helpful error
    +		return nil, ErrReferenceInvalidFormat
    +	}
    +
    +	if len(matches[1]) > NameTotalLengthMax {
    +		return nil, ErrNameTooLong
    +	}
    +
    +	ref := reference{
    +		name: matches[1],
    +		tag:  matches[2],
    +	}
    +	if matches[3] != "" {
    +		var err error
    +		ref.digest, err = digest.ParseDigest(matches[3])
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +
    +	r := getBestReferenceType(ref)
    +	if r == nil {
    +		return nil, ErrNameEmpty
    +	}
    +
    +	return r, nil
    +}
    +
    +// ParseNamed parses s and returns a syntactically valid reference implementing
    +// the Named interface. The reference must have a name, otherwise an error is
    +// returned.
    +// If an error was encountered it is returned, along with a nil Reference.
    +// NOTE: ParseNamed will not handle short digests.
    +func ParseNamed(s string) (Named, error) {
    +	ref, err := Parse(s)
    +	if err != nil {
    +		return nil, err
    +	}
    +	named, isNamed := ref.(Named)
    +	if !isNamed {
    +		return nil, fmt.Errorf("reference %s has no name", ref.String())
    +	}
    +	return named, nil
    +}
    +
    +// WithName returns a named object representing the given string. If the input
    +// is invalid ErrReferenceInvalidFormat will be returned.
    +func WithName(name string) (Named, error) {
    +	if len(name) > NameTotalLengthMax {
    +		return nil, ErrNameTooLong
    +	}
    +	if !anchoredNameRegexp.MatchString(name) {
    +		return nil, ErrReferenceInvalidFormat
    +	}
    +	return repository(name), nil
    +}
    +
    +// WithTag combines the name from "name" and the tag from "tag" to form a
    +// reference incorporating both the name and the tag.
    +func WithTag(name Named, tag string) (NamedTagged, error) {
    +	if !anchoredTagRegexp.MatchString(tag) {
    +		return nil, ErrTagInvalidFormat
    +	}
    +	return taggedReference{
    +		name: name.Name(),
    +		tag:  tag,
    +	}, nil
    +}
    +
    +// WithDigest combines the name from "name" and the digest from "digest" to form
    +// a reference incorporating both the name and the digest.
    +func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
    +	if !anchoredDigestRegexp.MatchString(digest.String()) {
    +		return nil, ErrDigestInvalidFormat
    +	}
    +	return canonicalReference{
    +		name:   name.Name(),
    +		digest: digest,
    +	}, nil
    +}
    +
    +func getBestReferenceType(ref reference) Reference {
    +	if ref.name == "" {
    +		// Allow digest only references
    +		if ref.digest != "" {
    +			return digestReference(ref.digest)
    +		}
    +		return nil
    +	}
    +	if ref.tag == "" {
    +		if ref.digest != "" {
    +			return canonicalReference{
    +				name:   ref.name,
    +				digest: ref.digest,
    +			}
    +		}
    +		return repository(ref.name)
    +	}
    +	if ref.digest == "" {
    +		return taggedReference{
    +			name: ref.name,
    +			tag:  ref.tag,
    +		}
    +	}
    +
    +	return ref
    +}
    +
    +type reference struct {
    +	name   string
    +	tag    string
    +	digest digest.Digest
    +}
    +
    +func (r reference) String() string {
    +	return r.name + ":" + r.tag + "@" + r.digest.String()
    +}
    +
    +func (r reference) Name() string {
    +	return r.name
    +}
    +
    +func (r reference) Tag() string {
    +	return r.tag
    +}
    +
    +func (r reference) Digest() digest.Digest {
    +	return r.digest
    +}
    +
    +type repository string
    +
    +func (r repository) String() string {
    +	return string(r)
    +}
    +
    +func (r repository) Name() string {
    +	return string(r)
    +}
    +
    +type digestReference digest.Digest
    +
    +func (d digestReference) String() string {
    +	return d.String()
    +}
    +
    +func (d digestReference) Digest() digest.Digest {
    +	return digest.Digest(d)
    +}
    +
    +type taggedReference struct {
    +	name string
    +	tag  string
    +}
    +
    +func (t taggedReference) String() string {
    +	return t.name + ":" + t.tag
    +}
    +
    +func (t taggedReference) Name() string {
    +	return t.name
    +}
    +
    +func (t taggedReference) Tag() string {
    +	return t.tag
    +}
    +
    +type canonicalReference struct {
    +	name   string
    +	digest digest.Digest
    +}
    +
    +func (c canonicalReference) String() string {
    +	return c.name + "@" + c.digest.String()
    +}
    +
    +func (c canonicalReference) Name() string {
    +	return c.name
    +}
    +
    +func (c canonicalReference) Digest() digest.Digest {
    +	return c.digest
    +}
    diff --git a/src/prometheus/vendor/github.com/docker/distribution/reference/regexp.go b/src/prometheus/vendor/github.com/docker/distribution/reference/regexp.go
    new file mode 100644
    index 0000000..9a7d366
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/docker/distribution/reference/regexp.go
    @@ -0,0 +1,124 @@
    +package reference
    +
    +import "regexp"
    +
    +var (
    +	// alphaNumericRegexp defines the alpha numeric atom, typically a
    +	// component of names. This only allows lower case characters and digits.
    +	alphaNumericRegexp = match(`[a-z0-9]+`)
    +
    +	// separatorRegexp defines the separators allowed to be embedded in name
    +	// components. This allow one period, one or two underscore and multiple
    +	// dashes.
    +	separatorRegexp = match(`(?:[._]|__|[-]*)`)
    +
    +	// nameComponentRegexp restricts registry path component names to start
    +	// with at least one letter or number, with following parts able to be
    +	// separated by one period, one or two underscore and multiple dashes.
    +	nameComponentRegexp = expression(
    +		alphaNumericRegexp,
    +		optional(repeated(separatorRegexp, alphaNumericRegexp)))
    +
    +	// hostnameComponentRegexp restricts the registry hostname component of a
    +	// repository name to start with a component as defined by hostnameRegexp
    +	// and followed by an optional port.
    +	hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
    +
    +	// hostnameRegexp defines the structure of potential hostname components
    +	// that may be part of image names. This is purposely a subset of what is
    +	// allowed by DNS to ensure backwards compatibility with Docker image
    +	// names.
    +	hostnameRegexp = expression(
    +		hostnameComponentRegexp,
    +		optional(repeated(literal(`.`), hostnameComponentRegexp)),
    +		optional(literal(`:`), match(`[0-9]+`)))
    +
    +	// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
    +	TagRegexp = match(`[\w][\w.-]{0,127}`)
    +
    +	// anchoredTagRegexp matches valid tag names, anchored at the start and
    +	// end of the matched string.
    +	anchoredTagRegexp = anchored(TagRegexp)
    +
    +	// DigestRegexp matches valid digests.
    +	DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
    +
    +	// anchoredDigestRegexp matches valid digests, anchored at the start and
    +	// end of the matched string.
    +	anchoredDigestRegexp = anchored(DigestRegexp)
    +
    +	// NameRegexp is the format for the name component of references. The
    +	// regexp has capturing groups for the hostname and name part omitting
    +	// the separating forward slash from either.
    +	NameRegexp = expression(
    +		optional(hostnameRegexp, literal(`/`)),
    +		nameComponentRegexp,
    +		optional(repeated(literal(`/`), nameComponentRegexp)))
    +
    +	// anchoredNameRegexp is used to parse a name value, capturing the
    +	// hostname and trailing components.
    +	anchoredNameRegexp = anchored(
    +		optional(capture(hostnameRegexp), literal(`/`)),
    +		capture(nameComponentRegexp,
    +			optional(repeated(literal(`/`), nameComponentRegexp))))
    +
    +	// ReferenceRegexp is the full supported format of a reference. The regexp
    +	// is anchored and has capturing groups for name, tag, and digest
    +	// components.
    +	ReferenceRegexp = anchored(capture(NameRegexp),
    +		optional(literal(":"), capture(TagRegexp)),
    +		optional(literal("@"), capture(DigestRegexp)))
    +)
    +
    +// match compiles the string to a regular expression.
    +var match = regexp.MustCompile
    +
    +// literal compiles s into a literal regular expression, escaping any regexp
    +// reserved characters.
    +func literal(s string) *regexp.Regexp {
    +	re := match(regexp.QuoteMeta(s))
    +
    +	if _, complete := re.LiteralPrefix(); !complete {
    +		panic("must be a literal")
    +	}
    +
    +	return re
    +}
    +
    +// expression defines a full expression, where each regular expression must
    +// follow the previous.
    +func expression(res ...*regexp.Regexp) *regexp.Regexp {
    +	var s string
    +	for _, re := range res {
    +		s += re.String()
    +	}
    +
    +	return match(s)
    +}
    +
    +// optional wraps the expression in a non-capturing group and makes the
    +// production optional.
    +func optional(res ...*regexp.Regexp) *regexp.Regexp {
    +	return match(group(expression(res...)).String() + `?`)
    +}
    +
    +// repeated wraps the regexp in a non-capturing group to get one or more
    +// matches.
    +func repeated(res ...*regexp.Regexp) *regexp.Regexp {
    +	return match(group(expression(res...)).String() + `+`)
    +}
    +
    +// group wraps the regexp in a non-capturing group.
    +func group(res ...*regexp.Regexp) *regexp.Regexp {
    +	return match(`(?:` + expression(res...).String() + `)`)
    +}
    +
    +// capture wraps the expression in a capturing group.
    +func capture(res ...*regexp.Regexp) *regexp.Regexp {
    +	return match(`(` + expression(res...).String() + `)`)
    +}
    +
    +// anchored anchors the regular expression by adding start and end delimiters.
    +func anchored(res ...*regexp.Regexp) *regexp.Regexp {
    +	return match(`^` + expression(res...).String() + `$`)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/CHANGES.md b/src/prometheus/vendor/github.com/emicklei/go-restful/CHANGES.md
    new file mode 100644
    index 0000000..070bca7
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/CHANGES.md
    @@ -0,0 +1,163 @@
    +Change history of go-restful
    +=
    +2016-02-14
    +- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
    +- add constructors for custom entity accessors for xml and json 
    +
    +2015-09-27
    +- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
    +
    +2015-09-25
    +- fixed problem with changing Header after WriteHeader (issue 235)
    +
    +2015-09-14
    +- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
    +- added support for custom EntityReaderWriters.
    +
    +2015-08-06
    +- add support for reading entities from compressed request content
    +- use sync.Pool for compressors of http response and request body
    +- add Description to Parameter for documentation in Swagger UI
    +
    +2015-03-20
    +- add configurable logging
    +
    +2015-03-18
    +- if not specified, the Operation is derived from the Route function
    +
    +2015-03-17
    +- expose Parameter creation functions
    +- make trace logger an interface
    +- fix OPTIONSFilter
    +- customize rendering of ServiceError
    +- JSR311 router now handles wildcards
    +- add Notes to Route
    +
    +2014-11-27
    +- (api add) PrettyPrint per response. (as proposed in #167)
    +
    +2014-11-12
    +- (api add) ApiVersion(.) for documentation in Swagger UI
    +
    +2014-11-10
    +- (api change) struct fields tagged with "description" show up in Swagger UI
    +
    +2014-10-31
    +- (api change) ReturnsError -> Returns
    +- (api add)    RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
    +- fix swagger nested structs
    +- sort Swagger response messages by code
    +
    +2014-10-23
    +- (api add) ReturnsError allows you to document Http codes in swagger
    +- fixed problem with greedy CurlyRouter
    +- (api add) Access-Control-Max-Age in CORS
    +- add tracing functionality (injectable) for debugging purposes
    +- support JSON parse 64bit int 
    +- fix empty parameters for swagger
    +- WebServicesUrl is now optional for swagger
    +- fixed duplicate AccessControlAllowOrigin in CORS
    +- (api change) expose ServeMux in container
    +- (api add) added AllowedDomains in CORS
    +- (api add) ParameterNamed for detailed documentation
    +
    +2014-04-16
    +- (api add) expose constructor of Request for testing.
    +
    +2014-06-27
    +- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
    +- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
    +
    +2014-07-03
    +- (api add) CORS can be configured with a list of allowed domains
    +
    +2014-03-12
    +- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
    +
    +2014-02-26
    +- (api add) Request now provides information about the matched Route, see method SelectedRoutePath 
    +
    +2014-02-17
    +- (api change) renamed parameter constants (go-lint checks)
    +
    +2014-01-10
    + - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
    +
    +2014-01-07
    + - (api change) Write* methods in Response now return the error or nil.
    + - added example of serving HTML from a Go template.
    + - fixed comparing Allowed headers in CORS (is now case-insensitive)
    +
    +2013-11-13
    + - (api add) Response knows how many bytes are written to the response body.
    +
    +2013-10-29
    + - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
    +
    +2013-10-04
    + - (api add) Response knows what HTTP status has been written
    + - (api add) Request can have attributes (map of string->interface, also called request-scoped variables
    +
    +2013-09-12
    + - (api change) Router interface simplified
    + - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
    +
    +2013-08-05
    + - add OPTIONS support
    + - add CORS support
    +
    +2013-08-27
    + - fixed some reported issues (see github)
    + - (api change) deprecated use of WriteError; use WriteErrorString instead
    +
    +2014-04-15
    + - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
    +
    +2013-08-08
    + - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
    + - (api add) the swagger package has be extended to have a UI per container.
    + - if panic is detected then a small stack trace is printed (thanks to runner-mei)
    + - (api add) WriteErrorString to Response
    +
    +Important API changes:
    +
    + - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
    + - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
    + 
    + 
    +2013-07-06
    +
    + - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
    +
    +2013-06-19
    +
    + - (improve) DoNotRecover option, moved request body closer, improved ReadEntity
    +
    +2013-06-03
    +
    + - (api change) removed Dispatcher interface, hide PathExpression
    + - changed receiver names of type functions to be more idiomatic Go
    +
    +2013-06-02
    +
    + - (optimize) Cache the RegExp compilation of Paths.
    +
    +2013-05-22
    +	
    + - (api add) Added support for request/response filter functions
    +
    +2013-05-18
    +
    +
    + - (api add) Added feature to change the default Http Request Dispatch function (travis cline)
    + - (api change) Moved Swagger Webservice to swagger package (see example restful-user)
    +
    +[2012-11-14 .. 2013-05-18>
    + 
    + - See https://github.com/emicklei/go-restful/commits
    +
    +2012-11-14
    +
    + - Initial commit
    +
    +
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/LICENSE b/src/prometheus/vendor/github.com/emicklei/go-restful/LICENSE
    new file mode 100644
    index 0000000..ece7ec6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/LICENSE
    @@ -0,0 +1,22 @@
    +Copyright (c) 2012,2013 Ernest Micklei
    +
    +MIT License
    +
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of this software and associated documentation files (the
    +"Software"), to deal in the Software without restriction, including
    +without limitation the rights to use, copy, modify, merge, publish,
    +distribute, sublicense, and/or sell copies of the Software, and to
    +permit persons to whom the Software is furnished to do so, subject to
    +the following conditions:
    +
    +The above copyright notice and this permission notice shall be
    +included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    \ No newline at end of file
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/Srcfile b/src/prometheus/vendor/github.com/emicklei/go-restful/Srcfile
    new file mode 100644
    index 0000000..16fd186
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/Srcfile
    @@ -0,0 +1 @@
    +{"SkipDirs": ["examples"]}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/compress.go b/src/prometheus/vendor/github.com/emicklei/go-restful/compress.go
    new file mode 100644
    index 0000000..220b377
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/compress.go
    @@ -0,0 +1,123 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bufio"
    +	"compress/gzip"
    +	"compress/zlib"
    +	"errors"
    +	"io"
    +	"net"
    +	"net/http"
    +	"strings"
    +)
    +
    +// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
    +var EnableContentEncoding = false
    +
    +// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
    +type CompressingResponseWriter struct {
    +	writer     http.ResponseWriter
    +	compressor io.WriteCloser
    +	encoding   string
    +}
    +
    +// Header is part of http.ResponseWriter interface
    +func (c *CompressingResponseWriter) Header() http.Header {
    +	return c.writer.Header()
    +}
    +
    +// WriteHeader is part of http.ResponseWriter interface
    +func (c *CompressingResponseWriter) WriteHeader(status int) {
    +	c.writer.WriteHeader(status)
    +}
    +
    +// Write is part of http.ResponseWriter interface
    +// It is passed through the compressor
    +func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
    +	if c.isCompressorClosed() {
    +		return -1, errors.New("Compressing error: tried to write data using closed compressor")
    +	}
    +	return c.compressor.Write(bytes)
    +}
    +
    +// CloseNotify is part of http.CloseNotifier interface
    +func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
    +	return c.writer.(http.CloseNotifier).CloseNotify()
    +}
    +
    +// Close the underlying compressor
    +func (c *CompressingResponseWriter) Close() error {
    +	if c.isCompressorClosed() {
    +		return errors.New("Compressing error: tried to close already closed compressor")
    +	}
    +
    +	c.compressor.Close()
    +	if ENCODING_GZIP == c.encoding {
    +		currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
    +	}
    +	if ENCODING_DEFLATE == c.encoding {
    +		currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
    +	}
    +	// gc hint needed?
    +	c.compressor = nil
    +	return nil
    +}
    +
    +func (c *CompressingResponseWriter) isCompressorClosed() bool {
    +	return nil == c.compressor
    +}
    +
    +// Hijack implements the Hijacker interface
    +// This is especially useful when combining Container.EnabledContentEncoding
    +// in combination with websockets (for instance gorilla/websocket)
    +func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
    +	hijacker, ok := c.writer.(http.Hijacker)
    +	if !ok {
    +		return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
    +	}
    +	return hijacker.Hijack()
    +}
    +
    +// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
    +func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
    +	header := httpRequest.Header.Get(HEADER_AcceptEncoding)
    +	gi := strings.Index(header, ENCODING_GZIP)
    +	zi := strings.Index(header, ENCODING_DEFLATE)
    +	// use in order of appearance
    +	if gi == -1 {
    +		return zi != -1, ENCODING_DEFLATE
    +	} else if zi == -1 {
    +		return gi != -1, ENCODING_GZIP
    +	} else {
    +		if gi < zi {
    +			return true, ENCODING_GZIP
    +		}
    +		return true, ENCODING_DEFLATE
    +	}
    +}
    +
    +// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
    +func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
    +	httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
    +	c := new(CompressingResponseWriter)
    +	c.writer = httpWriter
    +	var err error
    +	if ENCODING_GZIP == encoding {
    +		w := currentCompressorProvider.AcquireGzipWriter()
    +		w.Reset(httpWriter)
    +		c.compressor = w
    +		c.encoding = ENCODING_GZIP
    +	} else if ENCODING_DEFLATE == encoding {
    +		w := currentCompressorProvider.AcquireZlibWriter()
    +		w.Reset(httpWriter)
    +		c.compressor = w
    +		c.encoding = ENCODING_DEFLATE
    +	} else {
    +		return nil, errors.New("Unknown encoding:" + encoding)
    +	}
    +	return c, err
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/compressor_cache.go b/src/prometheus/vendor/github.com/emicklei/go-restful/compressor_cache.go
    new file mode 100644
    index 0000000..ee42601
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/compressor_cache.go
    @@ -0,0 +1,103 @@
    +package restful
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"compress/gzip"
    +	"compress/zlib"
    +)
    +
    +// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
    +// of writers and readers (resources).
    +// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
    +type BoundedCachedCompressors struct {
    +	gzipWriters     chan *gzip.Writer
    +	gzipReaders     chan *gzip.Reader
    +	zlibWriters     chan *zlib.Writer
    +	writersCapacity int
    +	readersCapacity int
    +}
    +
    +// NewBoundedCachedCompressors returns a new, with filled cache,  BoundedCachedCompressors.
    +func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
    +	b := &BoundedCachedCompressors{
    +		gzipWriters:     make(chan *gzip.Writer, writersCapacity),
    +		gzipReaders:     make(chan *gzip.Reader, readersCapacity),
    +		zlibWriters:     make(chan *zlib.Writer, writersCapacity),
    +		writersCapacity: writersCapacity,
    +		readersCapacity: readersCapacity,
    +	}
    +	for ix := 0; ix < writersCapacity; ix++ {
    +		b.gzipWriters <- newGzipWriter()
    +		b.zlibWriters <- newZlibWriter()
    +	}
    +	for ix := 0; ix < readersCapacity; ix++ {
    +		b.gzipReaders <- newGzipReader()
    +	}
    +	return b
    +}
    +
    +// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
    +func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
    +	var writer *gzip.Writer
    +	select {
    +	case writer, _ = <-b.gzipWriters:
    +	default:
    +		// return a new unmanaged one
    +		writer = newGzipWriter()
    +	}
    +	return writer
    +}
    +
    +// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
    +// only when the cache has room for it. It will ignore it otherwise.
    +func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
    +	// forget the unmanaged ones
    +	if len(b.gzipWriters) < b.writersCapacity {
    +		b.gzipWriters <- w
    +	}
    +}
    +
    +// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
    +func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
    +	var reader *gzip.Reader
    +	select {
    +	case reader, _ = <-b.gzipReaders:
    +	default:
    +		// return a new unmanaged one
    +		reader = newGzipReader()
    +	}
    +	return reader
    +}
    +
    +// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
    +// only when the cache has room for it. It will ignore it otherwise.
    +func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
    +	// forget the unmanaged ones
    +	if len(b.gzipReaders) < b.readersCapacity {
    +		b.gzipReaders <- r
    +	}
    +}
    +
    +// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
    +func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
    +	var writer *zlib.Writer
    +	select {
    +	case writer, _ = <-b.zlibWriters:
    +	default:
    +		// return a new unmanaged one
    +		writer = newZlibWriter()
    +	}
    +	return writer
    +}
    +
    +// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
    +// only when the cache has room for it. It will ignore it otherwise.
    +func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
    +	// forget the unmanaged ones
    +	if len(b.zlibWriters) < b.writersCapacity {
    +		b.zlibWriters <- w
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/compressor_pools.go b/src/prometheus/vendor/github.com/emicklei/go-restful/compressor_pools.go
    new file mode 100644
    index 0000000..d866ce6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/compressor_pools.go
    @@ -0,0 +1,91 @@
    +package restful
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"compress/gzip"
    +	"compress/zlib"
    +	"sync"
    +)
    +
    +// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
    +type SyncPoolCompessors struct {
    +	GzipWriterPool *sync.Pool
    +	GzipReaderPool *sync.Pool
    +	ZlibWriterPool *sync.Pool
    +}
    +
    +// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
    +func NewSyncPoolCompessors() *SyncPoolCompessors {
    +	return &SyncPoolCompessors{
    +		GzipWriterPool: &sync.Pool{
    +			New: func() interface{} { return newGzipWriter() },
    +		},
    +		GzipReaderPool: &sync.Pool{
    +			New: func() interface{} { return newGzipReader() },
    +		},
    +		ZlibWriterPool: &sync.Pool{
    +			New: func() interface{} { return newZlibWriter() },
    +		},
    +	}
    +}
    +
    +func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
    +	return s.GzipWriterPool.Get().(*gzip.Writer)
    +}
    +
    +func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
    +	s.GzipWriterPool.Put(w)
    +}
    +
    +func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
    +	return s.GzipReaderPool.Get().(*gzip.Reader)
    +}
    +
    +func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
    +	s.GzipReaderPool.Put(r)
    +}
    +
    +func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
    +	return s.ZlibWriterPool.Get().(*zlib.Writer)
    +}
    +
    +func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
    +	s.ZlibWriterPool.Put(w)
    +}
    +
    +func newGzipWriter() *gzip.Writer {
    +	// create with an empty bytes writer; it will be replaced before using the gzipWriter
    +	writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
    +	if err != nil {
    +		panic(err.Error())
    +	}
    +	return writer
    +}
    +
    +func newGzipReader() *gzip.Reader {
    +	// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
    +	// we can safely use currentCompressProvider because it is set on package initialization.
    +	w := currentCompressorProvider.AcquireGzipWriter()
    +	defer currentCompressorProvider.ReleaseGzipWriter(w)
    +	b := new(bytes.Buffer)
    +	w.Reset(b)
    +	w.Flush()
    +	w.Close()
    +	reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
    +	if err != nil {
    +		panic(err.Error())
    +	}
    +	return reader
    +}
    +
    +func newZlibWriter() *zlib.Writer {
    +	writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
    +	if err != nil {
    +		panic(err.Error())
    +	}
    +	return writer
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/compressors.go b/src/prometheus/vendor/github.com/emicklei/go-restful/compressors.go
    new file mode 100644
    index 0000000..f028456
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/compressors.go
    @@ -0,0 +1,53 @@
    +package restful
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"compress/gzip"
    +	"compress/zlib"
    +)
    +
    +type CompressorProvider interface {
    +	// Returns a *gzip.Writer which needs to be released later.
    +	// Before using it, call Reset().
    +	AcquireGzipWriter() *gzip.Writer
    +
    +	// Releases an aqcuired *gzip.Writer.
    +	ReleaseGzipWriter(w *gzip.Writer)
    +
    +	// Returns a *gzip.Reader which needs to be released later.
    +	AcquireGzipReader() *gzip.Reader
    +
    +	// Releases an aqcuired *gzip.Reader.
    +	ReleaseGzipReader(w *gzip.Reader)
    +
    +	// Returns a *zlib.Writer which needs to be released later.
    +	// Before using it, call Reset().
    +	AcquireZlibWriter() *zlib.Writer
    +
    +	// Releases an aqcuired *zlib.Writer.
    +	ReleaseZlibWriter(w *zlib.Writer)
    +}
    +
    +// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
    +var currentCompressorProvider CompressorProvider
    +
    +func init() {
    +	currentCompressorProvider = NewSyncPoolCompessors()
    +}
    +
    +// CurrentCompressorProvider returns the current CompressorProvider.
    +// It is initialized using a SyncPoolCompessors.
    +func CurrentCompressorProvider() CompressorProvider {
    +	return currentCompressorProvider
    +}
    +
    +// CompressorProvider sets the actual provider of compressors (zlib or gzip).
    +func SetCompressorProvider(p CompressorProvider) {
    +	if p == nil {
    +		panic("cannot set compressor provider to nil")
    +	}
    +	currentCompressorProvider = p
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/constants.go b/src/prometheus/vendor/github.com/emicklei/go-restful/constants.go
    new file mode 100644
    index 0000000..203439c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/constants.go
    @@ -0,0 +1,30 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +const (
    +	MIME_XML   = "application/xml"          // Accept or Content-Type used in Consumes() and/or Produces()
    +	MIME_JSON  = "application/json"         // Accept or Content-Type used in Consumes() and/or Produces()
    +	MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
    +
    +	HEADER_Allow                         = "Allow"
    +	HEADER_Accept                        = "Accept"
    +	HEADER_Origin                        = "Origin"
    +	HEADER_ContentType                   = "Content-Type"
    +	HEADER_LastModified                  = "Last-Modified"
    +	HEADER_AcceptEncoding                = "Accept-Encoding"
    +	HEADER_ContentEncoding               = "Content-Encoding"
    +	HEADER_AccessControlExposeHeaders    = "Access-Control-Expose-Headers"
    +	HEADER_AccessControlRequestMethod    = "Access-Control-Request-Method"
    +	HEADER_AccessControlRequestHeaders   = "Access-Control-Request-Headers"
    +	HEADER_AccessControlAllowMethods     = "Access-Control-Allow-Methods"
    +	HEADER_AccessControlAllowOrigin      = "Access-Control-Allow-Origin"
    +	HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
    +	HEADER_AccessControlAllowHeaders     = "Access-Control-Allow-Headers"
    +	HEADER_AccessControlMaxAge           = "Access-Control-Max-Age"
    +
    +	ENCODING_GZIP    = "gzip"
    +	ENCODING_DEFLATE = "deflate"
    +)
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/container.go b/src/prometheus/vendor/github.com/emicklei/go-restful/container.go
    new file mode 100644
    index 0000000..4e53ccc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/container.go
    @@ -0,0 +1,361 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"errors"
    +	"fmt"
    +	"net/http"
    +	"os"
    +	"runtime"
    +	"strings"
    +	"sync"
    +
    +	"github.com/emicklei/go-restful/log"
    +)
    +
    +// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
    +// The requests are further dispatched to routes of WebServices using a RouteSelector
    +type Container struct {
    +	webServicesLock        sync.RWMutex
    +	webServices            []*WebService
    +	ServeMux               *http.ServeMux
    +	isRegisteredOnRoot     bool
    +	containerFilters       []FilterFunction
    +	doNotRecover           bool // default is false
    +	recoverHandleFunc      RecoverHandleFunction
    +	serviceErrorHandleFunc ServiceErrorHandleFunction
    +	router                 RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
    +	contentEncodingEnabled bool          // default is false
    +}
    +
    +// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
    +func NewContainer() *Container {
    +	return &Container{
    +		webServices:            []*WebService{},
    +		ServeMux:               http.NewServeMux(),
    +		isRegisteredOnRoot:     false,
    +		containerFilters:       []FilterFunction{},
    +		doNotRecover:           false,
    +		recoverHandleFunc:      logStackOnRecover,
    +		serviceErrorHandleFunc: writeServiceError,
    +		router:                 RouterJSR311{},
    +		contentEncodingEnabled: false}
    +}
    +
    +// RecoverHandleFunction declares functions that can be used to handle a panic situation.
    +// The first argument is what recover() returns. The second must be used to communicate an error response.
    +type RecoverHandleFunction func(interface{}, http.ResponseWriter)
    +
    +// RecoverHandler changes the default function (logStackOnRecover) to be called
    +// when a panic is detected. DoNotRecover must be have its default value (=false).
    +func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
    +	c.recoverHandleFunc = handler
    +}
    +
    +// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
    +// The first argument is the service error, the second is the request that resulted in the error and
    +// the third must be used to communicate an error response.
    +type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
    +
    +// ServiceErrorHandler changes the default function (writeServiceError) to be called
    +// when a ServiceError is detected.
    +func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
    +	c.serviceErrorHandleFunc = handler
    +}
    +
    +// DoNotRecover controls whether panics will be caught to return HTTP 500.
    +// If set to true, Route functions are responsible for handling any error situation.
    +// Default value is false = recover from panics. This has performance implications.
    +func (c *Container) DoNotRecover(doNot bool) {
    +	c.doNotRecover = doNot
    +}
    +
    +// Router changes the default Router (currently RouterJSR311)
    +func (c *Container) Router(aRouter RouteSelector) {
    +	c.router = aRouter
    +}
    +
    +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
    +func (c *Container) EnableContentEncoding(enabled bool) {
    +	c.contentEncodingEnabled = enabled
    +}
    +
    +// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
    +func (c *Container) Add(service *WebService) *Container {
    +	c.webServicesLock.Lock()
    +	defer c.webServicesLock.Unlock()
    +
    +	// if rootPath was not set then lazy initialize it
    +	if len(service.rootPath) == 0 {
    +		service.Path("/")
    +	}
    +
    +	// cannot have duplicate root paths
    +	for _, each := range c.webServices {
    +		if each.RootPath() == service.RootPath() {
    +			log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
    +			os.Exit(1)
    +		}
    +	}
    +
    +	// If not registered on root then add specific mapping
    +	if !c.isRegisteredOnRoot {
    +		c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
    +	}
    +	c.webServices = append(c.webServices, service)
    +	return c
    +}
    +
    +// addHandler may set a new HandleFunc for the serveMux
    +// this function must run inside the critical region protected by the webServicesLock.
    +// returns true if the function was registered on root ("/")
    +func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
    +	pattern := fixedPrefixPath(service.RootPath())
    +	// check if root path registration is needed
    +	if "/" == pattern || "" == pattern {
    +		serveMux.HandleFunc("/", c.dispatch)
    +		return true
    +	}
    +	// detect if registration already exists
    +	alreadyMapped := false
    +	for _, each := range c.webServices {
    +		if each.RootPath() == service.RootPath() {
    +			alreadyMapped = true
    +			break
    +		}
    +	}
    +	if !alreadyMapped {
    +		serveMux.HandleFunc(pattern, c.dispatch)
    +		if !strings.HasSuffix(pattern, "/") {
    +			serveMux.HandleFunc(pattern+"/", c.dispatch)
    +		}
    +	}
    +	return false
    +}
    +
    +func (c *Container) Remove(ws *WebService) error {
    +	if c.ServeMux == http.DefaultServeMux {
    +		errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
    +		log.Printf(errMsg)
    +		return errors.New(errMsg)
    +	}
    +	c.webServicesLock.Lock()
    +	defer c.webServicesLock.Unlock()
    +	// build a new ServeMux and re-register all WebServices
    +	newServeMux := http.NewServeMux()
    +	newServices := []*WebService{}
    +	newIsRegisteredOnRoot := false
    +	for _, each := range c.webServices {
    +		if each.rootPath != ws.rootPath {
    +			// If not registered on root then add specific mapping
    +			if !newIsRegisteredOnRoot {
    +				newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
    +			}
    +			newServices = append(newServices, each)
    +		}
    +	}
    +	c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
    +	return nil
    +}
    +
    +// logStackOnRecover is the default RecoverHandleFunction and is called
    +// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
    +// Default implementation logs the stacktrace and writes the stacktrace on the response.
    +// This may be a security issue as it exposes sourcecode information.
    +func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
    +	var buffer bytes.Buffer
    +	buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
    +	for i := 2; ; i += 1 {
    +		_, file, line, ok := runtime.Caller(i)
    +		if !ok {
    +			break
    +		}
    +		buffer.WriteString(fmt.Sprintf("    %s:%d\r\n", file, line))
    +	}
    +	log.Print(buffer.String())
    +	httpWriter.WriteHeader(http.StatusInternalServerError)
    +	httpWriter.Write(buffer.Bytes())
    +}
    +
    +// writeServiceError is the default ServiceErrorHandleFunction and is called
    +// when a ServiceError is returned during route selection. Default implementation
    +// calls resp.WriteErrorString(err.Code, err.Message)
    +func writeServiceError(err ServiceError, req *Request, resp *Response) {
    +	resp.WriteErrorString(err.Code, err.Message)
    +}
    +
    +// Dispatch the incoming Http Request to a matching WebService.
    +func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
    +	writer := httpWriter
    +
    +	// CompressingResponseWriter should be closed after all operations are done
    +	defer func() {
    +		if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
    +			compressWriter.Close()
    +		}
    +	}()
    +
    +	// Instal panic recovery unless told otherwise
    +	if !c.doNotRecover { // catch all for 500 response
    +		defer func() {
    +			if r := recover(); r != nil {
    +				c.recoverHandleFunc(r, writer)
    +				return
    +			}
    +		}()
    +	}
    +	// Install closing the request body (if any)
    +	defer func() {
    +		if nil != httpRequest.Body {
    +			httpRequest.Body.Close()
    +		}
    +	}()
    +
    +	// Detect if compression is needed
    +	// assume without compression, test for override
    +	if c.contentEncodingEnabled {
    +		doCompress, encoding := wantsCompressedResponse(httpRequest)
    +		if doCompress {
    +			var err error
    +			writer, err = NewCompressingResponseWriter(httpWriter, encoding)
    +			if err != nil {
    +				log.Print("[restful] unable to install compressor: ", err)
    +				httpWriter.WriteHeader(http.StatusInternalServerError)
    +				return
    +			}
    +		}
    +	}
    +	// Find best match Route ; err is non nil if no match was found
    +	var webService *WebService
    +	var route *Route
    +	var err error
    +	func() {
    +		c.webServicesLock.RLock()
    +		defer c.webServicesLock.RUnlock()
    +		webService, route, err = c.router.SelectRoute(
    +			c.webServices,
    +			httpRequest)
    +	}()
    +	if err != nil {
    +		// a non-200 response has already been written
    +		// run container filters anyway ; they should not touch the response...
    +		chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
    +			switch err.(type) {
    +			case ServiceError:
    +				ser := err.(ServiceError)
    +				c.serviceErrorHandleFunc(ser, req, resp)
    +			}
    +			// TODO
    +		}}
    +		chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
    +		return
    +	}
    +	wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
    +	// pass through filters (if any)
    +	if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
    +		// compose filter chain
    +		allFilters := []FilterFunction{}
    +		allFilters = append(allFilters, c.containerFilters...)
    +		allFilters = append(allFilters, webService.filters...)
    +		allFilters = append(allFilters, route.Filters...)
    +		chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
    +			// handle request by route after passing all filters
    +			route.Function(wrappedRequest, wrappedResponse)
    +		}}
    +		chain.ProcessFilter(wrappedRequest, wrappedResponse)
    +	} else {
    +		// no filters, handle request by route
    +		route.Function(wrappedRequest, wrappedResponse)
    +	}
    +}
    +
    +// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
    +func fixedPrefixPath(pathspec string) string {
    +	varBegin := strings.Index(pathspec, "{")
    +	if -1 == varBegin {
    +		return pathspec
    +	}
    +	return pathspec[:varBegin]
    +}
    +
    +// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
    +func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
    +	c.ServeMux.ServeHTTP(httpwriter, httpRequest)
    +}
    +
    +// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
    +func (c *Container) Handle(pattern string, handler http.Handler) {
    +	c.ServeMux.Handle(pattern, handler)
    +}
    +
    +// HandleWithFilter registers the handler for the given pattern.
    +// Container's filter chain is applied for handler.
    +// If a handler already exists for pattern, HandleWithFilter panics.
    +func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
    +	f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
    +		if len(c.containerFilters) == 0 {
    +			handler.ServeHTTP(httpResponse, httpRequest)
    +			return
    +		}
    +
    +		chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
    +			handler.ServeHTTP(httpResponse, httpRequest)
    +		}}
    +		chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
    +	}
    +
    +	c.Handle(pattern, http.HandlerFunc(f))
    +}
    +
    +// Filter appends a container FilterFunction. These are called before dispatching
    +// a http.Request to a WebService from the container
    +func (c *Container) Filter(filter FilterFunction) {
    +	c.containerFilters = append(c.containerFilters, filter)
    +}
    +
    +// RegisteredWebServices returns the collections of added WebServices
    +func (c *Container) RegisteredWebServices() []*WebService {
    +	c.webServicesLock.RLock()
    +	defer c.webServicesLock.RUnlock()
    +	result := make([]*WebService, len(c.webServices))
    +	for ix := range c.webServices {
    +		result[ix] = c.webServices[ix]
    +	}
    +	return result
    +}
    +
    +// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
    +func (c *Container) computeAllowedMethods(req *Request) []string {
    +	// Go through all RegisteredWebServices() and all its Routes to collect the options
    +	methods := []string{}
    +	requestPath := req.Request.URL.Path
    +	for _, ws := range c.RegisteredWebServices() {
    +		matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
    +		if matches != nil {
    +			finalMatch := matches[len(matches)-1]
    +			for _, rt := range ws.Routes() {
    +				matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
    +				if matches != nil {
    +					lastMatch := matches[len(matches)-1]
    +					if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
    +						methods = append(methods, rt.Method)
    +					}
    +				}
    +			}
    +		}
    +	}
    +	// methods = append(methods, "OPTIONS")  not sure about this
    +	return methods
    +}
    +
    +// newBasicRequestResponse creates a pair of Request,Response from its http versions.
    +// It is basic because no parameter or (produces) content-type information is given.
    +func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
    +	resp := NewResponse(httpWriter)
    +	resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
    +	return NewRequest(httpRequest), resp
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/cors_filter.go b/src/prometheus/vendor/github.com/emicklei/go-restful/cors_filter.go
    new file mode 100644
    index 0000000..1efeef0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/cors_filter.go
    @@ -0,0 +1,202 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"regexp"
    +	"strconv"
    +	"strings"
    +)
    +
    +// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
    +// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
    +// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
    +//
    +// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
    +// http://enable-cors.org/server.html
    +// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
    +type CrossOriginResourceSharing struct {
    +	ExposeHeaders  []string // list of Header names
    +	AllowedHeaders []string // list of Header names
    +	AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
    +	AllowedMethods []string
    +	MaxAge         int // number of seconds before requiring new Options request
    +	CookiesAllowed bool
    +	Container      *Container
    +
    +	allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
    +}
    +
    +// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
    +// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
    +func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
    +	origin := req.Request.Header.Get(HEADER_Origin)
    +	if len(origin) == 0 {
    +		if trace {
    +			traceLogger.Print("no Http header Origin set")
    +		}
    +		chain.ProcessFilter(req, resp)
    +		return
    +	}
    +	if !c.isOriginAllowed(origin) { // check whether this origin is allowed
    +		if trace {
    +			traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
    +		}
    +		chain.ProcessFilter(req, resp)
    +		return
    +	}
    +	if req.Request.Method != "OPTIONS" {
    +		c.doActualRequest(req, resp)
    +		chain.ProcessFilter(req, resp)
    +		return
    +	}
    +	if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
    +		c.doPreflightRequest(req, resp)
    +	} else {
    +		c.doActualRequest(req, resp)
    +		chain.ProcessFilter(req, resp)
    +		return
    +	}
    +}
    +
    +func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
    +	c.setOptionsHeaders(req, resp)
    +	// continue processing the response
    +}
    +
    +func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
    +	if len(c.AllowedMethods) == 0 {
    +		if c.Container == nil {
    +			c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
    +		} else {
    +			c.AllowedMethods = c.Container.computeAllowedMethods(req)
    +		}
    +	}
    +
    +	acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
    +	if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
    +		if trace {
    +			traceLogger.Printf("Http header %s:%s is not in %v",
    +				HEADER_AccessControlRequestMethod,
    +				acrm,
    +				c.AllowedMethods)
    +		}
    +		return
    +	}
    +	acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
    +	if len(acrhs) > 0 {
    +		for _, each := range strings.Split(acrhs, ",") {
    +			if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
    +				if trace {
    +					traceLogger.Printf("Http header %s:%s is not in %v",
    +						HEADER_AccessControlRequestHeaders,
    +						acrhs,
    +						c.AllowedHeaders)
    +				}
    +				return
    +			}
    +		}
    +	}
    +	resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
    +	resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
    +	c.setOptionsHeaders(req, resp)
    +
    +	// return http 200 response, no body
    +}
    +
    +func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
    +	c.checkAndSetExposeHeaders(resp)
    +	c.setAllowOriginHeader(req, resp)
    +	c.checkAndSetAllowCredentials(resp)
    +	if c.MaxAge > 0 {
    +		resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
    +	}
    +}
    +
    +func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
    +	if len(origin) == 0 {
    +		return false
    +	}
    +	if len(c.AllowedDomains) == 0 {
    +		return true
    +	}
    +
    +	allowed := false
    +	for _, domain := range c.AllowedDomains {
    +		if domain == origin {
    +			allowed = true
    +			break
    +		}
    +	}
    +
    +	if !allowed {
    +		if len(c.allowedOriginPatterns) == 0 {
    +			// compile allowed domains to allowed origin patterns
    +			allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
    +			if err != nil {
    +				return false
    +			}
    +			c.allowedOriginPatterns = allowedOriginRegexps
    +		}
    +
    +		for _, pattern := range c.allowedOriginPatterns {
    +			if allowed = pattern.MatchString(origin); allowed {
    +				break
    +			}
    +		}
    +	}
    +
    +	return allowed
    +}
    +
    +func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
    +	origin := req.Request.Header.Get(HEADER_Origin)
    +	if c.isOriginAllowed(origin) {
    +		resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
    +	}
    +}
    +
    +func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
    +	if len(c.ExposeHeaders) > 0 {
    +		resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
    +	}
    +}
    +
    +func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
    +	if c.CookiesAllowed {
    +		resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
    +	}
    +}
    +
    +func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
    +	for _, each := range allowedMethods {
    +		if each == method {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
    +	for _, each := range c.AllowedHeaders {
    +		if strings.ToLower(each) == strings.ToLower(header) {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// Take a list of strings and compile them into a list of regular expressions.
    +func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
    +	regexps := []*regexp.Regexp{}
    +	for _, regexpStr := range regexpStrings {
    +		r, err := regexp.Compile(regexpStr)
    +		if err != nil {
    +			return regexps, err
    +		}
    +		regexps = append(regexps, r)
    +	}
    +	return regexps, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/curly.go b/src/prometheus/vendor/github.com/emicklei/go-restful/curly.go
    new file mode 100644
    index 0000000..185300d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/curly.go
    @@ -0,0 +1,162 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"net/http"
    +	"regexp"
    +	"sort"
    +	"strings"
    +)
    +
    +// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
    +type CurlyRouter struct{}
    +
    +// SelectRoute is part of the Router interface and returns the best match
    +// for the WebService and its Route for the given Request.
    +func (c CurlyRouter) SelectRoute(
    +	webServices []*WebService,
    +	httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
    +
    +	requestTokens := tokenizePath(httpRequest.URL.Path)
    +
    +	detectedService := c.detectWebService(requestTokens, webServices)
    +	if detectedService == nil {
    +		if trace {
    +			traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
    +		}
    +		return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
    +	}
    +	candidateRoutes := c.selectRoutes(detectedService, requestTokens)
    +	if len(candidateRoutes) == 0 {
    +		if trace {
    +			traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
    +		}
    +		return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
    +	}
    +	selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
    +	if selectedRoute == nil {
    +		return detectedService, nil, err
    +	}
    +	return detectedService, selectedRoute, nil
    +}
    +
    +// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
    +func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
    +	candidates := sortableCurlyRoutes{}
    +	for _, each := range ws.routes {
    +		matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
    +		if matches {
    +			candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
    +		}
    +	}
    +	sort.Sort(sort.Reverse(candidates))
    +	return candidates
    +}
    +
    +// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
    +func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
    +	if len(routeTokens) < len(requestTokens) {
    +		// proceed in matching only if last routeToken is wildcard
    +		count := len(routeTokens)
    +		if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
    +			return false, 0, 0
    +		}
    +		// proceed
    +	}
    +	for i, routeToken := range routeTokens {
    +		if i == len(requestTokens) {
    +			// reached end of request path
    +			return false, 0, 0
    +		}
    +		requestToken := requestTokens[i]
    +		if strings.HasPrefix(routeToken, "{") {
    +			paramCount++
    +			if colon := strings.Index(routeToken, ":"); colon != -1 {
    +				// match by regex
    +				matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
    +				if !matchesToken {
    +					return false, 0, 0
    +				}
    +				if matchesRemainder {
    +					break
    +				}
    +			}
    +		} else { // no { prefix
    +			if requestToken != routeToken {
    +				return false, 0, 0
    +			}
    +			staticCount++
    +		}
    +	}
    +	return true, paramCount, staticCount
    +}
    +
    +// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
    +// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
    +func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
    +	regPart := routeToken[colon+1 : len(routeToken)-1]
    +	if regPart == "*" {
    +		if trace {
    +			traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
    +		}
    +		return true, true
    +	}
    +	matched, err := regexp.MatchString(regPart, requestToken)
    +	return (matched && err == nil), false
    +}
    +
    +// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
    +// headers of the Request. See also RouterJSR311 in jsr311.go
    +func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
    +	// tracing is done inside detectRoute
    +	return RouterJSR311{}.detectRoute(candidateRoutes.routes(), httpRequest)
    +}
    +
    +// detectWebService returns the best matching webService given the list of path tokens.
    +// see also computeWebserviceScore
    +func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
    +	var best *WebService
    +	score := -1
    +	for _, each := range webServices {
    +		matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
    +		if matches && (eachScore > score) {
    +			best = each
    +			score = eachScore
    +		}
    +	}
    +	return best
    +}
    +
    +// computeWebserviceScore returns whether tokens match and
    +// the weighted score of the longest matching consecutive tokens from the beginning.
    +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
    +	if len(tokens) > len(requestTokens) {
    +		return false, 0
    +	}
    +	score := 0
    +	for i := 0; i < len(tokens); i++ {
    +		each := requestTokens[i]
    +		other := tokens[i]
    +		if len(each) == 0 && len(other) == 0 {
    +			score++
    +			continue
    +		}
    +		if len(other) > 0 && strings.HasPrefix(other, "{") {
    +			// no empty match
    +			if len(each) == 0 {
    +				return false, score
    +			}
    +			score += 1
    +		} else {
    +			// not a parameter
    +			if each != other {
    +				return false, score
    +			}
    +			score += (len(tokens) - i) * 10 //fuzzy
    +		}
    +	}
    +	return true, score
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/curly_route.go b/src/prometheus/vendor/github.com/emicklei/go-restful/curly_route.go
    new file mode 100644
    index 0000000..296f946
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/curly_route.go
    @@ -0,0 +1,52 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
    +type curlyRoute struct {
    +	route       Route
    +	paramCount  int
    +	staticCount int
    +}
    +
    +type sortableCurlyRoutes []curlyRoute
    +
    +func (s *sortableCurlyRoutes) add(route curlyRoute) {
    +	*s = append(*s, route)
    +}
    +
    +func (s sortableCurlyRoutes) routes() (routes []Route) {
    +	for _, each := range s {
    +		routes = append(routes, each.route) // TODO change return type
    +	}
    +	return routes
    +}
    +
    +func (s sortableCurlyRoutes) Len() int {
    +	return len(s)
    +}
    +func (s sortableCurlyRoutes) Swap(i, j int) {
    +	s[i], s[j] = s[j], s[i]
    +}
    +func (s sortableCurlyRoutes) Less(i, j int) bool {
    +	ci := s[i]
    +	cj := s[j]
    +
    +	// primary key
    +	if ci.staticCount < cj.staticCount {
    +		return true
    +	}
    +	if ci.staticCount > cj.staticCount {
    +		return false
    +	}
    +	// secundary key
    +	if ci.paramCount < cj.paramCount {
    +		return true
    +	}
    +	if ci.paramCount > cj.paramCount {
    +		return false
    +	}
    +	return ci.route.Path < cj.route.Path
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/doc.go b/src/prometheus/vendor/github.com/emicklei/go-restful/doc.go
    new file mode 100644
    index 0000000..d40405b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/doc.go
    @@ -0,0 +1,196 @@
    +/*
    +Package restful, a lean package for creating REST-style WebServices without magic.
    +
    +WebServices and Routes
    +
    +A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
    +Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
    +WebServices must be added to a container (see below) in order to handler Http requests from a server.
    +
    +A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
    +This package has the logic to find the best matching Route and if found, call its Function.
    +
    +	ws := new(restful.WebService)
    +	ws.
    +		Path("/users").
    +		Consumes(restful.MIME_JSON, restful.MIME_XML).
    +		Produces(restful.MIME_JSON, restful.MIME_XML)
    +
    +	ws.Route(ws.GET("/{user-id}").To(u.findUser))  // u is a UserResource
    +
    +	...
    +
    +	// GET http://localhost:8080/users/1
    +	func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
    +		id := request.PathParameter("user-id")
    +		...
    +	}
    +
    +The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
    +
    +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
    +
    +Regular expression matching Routes
    +
    +A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
    +For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
    +Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
    +This feature requires the use of a CurlyRouter.
    +
    +Containers
    +
    +A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
    +Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
    +The Default container of go-restful uses the http.DefaultServeMux.
    +You can create your own Container and create a new http.Server for that particular container.
    +
    +	container := restful.NewContainer()
    +	server := &http.Server{Addr: ":8081", Handler: container}
    +
    +Filters
    +
    +A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
    +You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
    +In the restful package there are three hooks into the request,response flow where filters can be added.
    +Each filter must define a FilterFunction:
    +
    +	func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
    +
    +Use the following statement to pass the request,response pair to the next filter or RouteFunction
    +
    +	chain.ProcessFilter(req, resp)
    +
    +Container Filters
    +
    +These are processed before any registered WebService.
    +
    +	// install a (global) filter for the default container (processed before any webservice)
    +	restful.Filter(globalLogging)
    +
    +WebService Filters
    +
    +These are processed before any Route of a WebService.
    +
    +	// install a webservice filter (processed before any route)
    +	ws.Filter(webserviceLogging).Filter(measureTime)
    +
    +
    +Route Filters
    +
    +These are processed before calling the function associated with the Route.
    +
    +	// install 2 chained route filters (processed before calling findUser)
    +	ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
    +
    +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
    +
    +Response Encoding
    +
    +Two encodings are supported: gzip and deflate. To enable this for all responses:
    +
    +	restful.DefaultContainer.EnableContentEncoding(true)
    +
    +If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
    +Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
    +
    +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
    +
    +OPTIONS support
    +
    +By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
    +
    +	Filter(OPTIONSFilter())
    +
    +CORS
    +
    +By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
    +
    +	cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
    +	Filter(cors.Filter)
    +
    +Error Handling
    +
    +Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
    +For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
    +
    +	400: Bad Request
    +
    +If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
    +
    +	404: Not Found
    +
    +Despite a valid URI, the resource requested may not be available
    +
    +	500: Internal Server Error
    +
    +If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
    +
    +	405: Method Not Allowed
    +
    +The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
    +
    +	406: Not Acceptable
    +
    +The request does not have or has an unknown Accept Header set for this operation.
    +
    +	415: Unsupported Media Type
    +
    +The request does not have or has an unknown Content-Type Header set for this operation.
    +
    +ServiceError
    +
    +In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
    +
    +Performance options
    +
    +This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
    +
    +	restful.DefaultContainer.Router(CurlyRouter{})
    +
    +The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
    +However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
    +The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
    +
    +	restful.DefaultContainer.DoNotRecover(true)
    +
    +DoNotRecover controls whether panics will be caught to return HTTP 500.
    +If set to true, Route functions are responsible for handling any error situation.
    +Default value is false; it will recover from panics. This has performance implications.
    +
    +	restful.SetCacheReadEntity(false)
    +
    +SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
    +If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
    +
    +	restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
    +
    +If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
    +Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
    +
    +Trouble shooting
    +
    +This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
    +Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
    +
    +	restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
    +
    +Logging
    +
    +The restful.SetLogger() method allows you to override the logger used by the package. By default restful
    +uses the standard library `log` package and logs to stdout. Different logging packages are supported as
    +long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
    +preferred package is simple.
    +
    +Resources
    +
    +[project]: https://github.com/emicklei/go-restful
    +
    +[examples]: https://github.com/emicklei/go-restful/blob/master/examples
    +
    +[design]:  http://ernestmicklei.com/2012/11/11/go-restful-api-design/
    +
    +[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
    +
    +(c) 2012-2015, http://ernestmicklei.com. MIT License
    +*/
    +package restful
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/entity_accessors.go b/src/prometheus/vendor/github.com/emicklei/go-restful/entity_accessors.go
    new file mode 100644
    index 0000000..6ecf6c7
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/entity_accessors.go
    @@ -0,0 +1,163 @@
    +package restful
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"encoding/json"
    +	"encoding/xml"
    +	"strings"
    +	"sync"
    +)
    +
    +// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
    +type EntityReaderWriter interface {
    +	// Read a serialized version of the value from the request.
    +	// The Request may have a decompressing reader. Depends on Content-Encoding.
    +	Read(req *Request, v interface{}) error
    +
    +	// Write a serialized version of the value on the response.
    +	// The Response may have a compressing writer. Depends on Accept-Encoding.
    +	// status should be a valid Http Status code
    +	Write(resp *Response, status int, v interface{}) error
    +}
    +
    +// entityAccessRegistry is a singleton
    +var entityAccessRegistry = &entityReaderWriters{
    +	protection: new(sync.RWMutex),
    +	accessors:  map[string]EntityReaderWriter{},
    +}
    +
    +// entityReaderWriters associates MIME to an EntityReaderWriter
    +type entityReaderWriters struct {
    +	protection *sync.RWMutex
    +	accessors  map[string]EntityReaderWriter
    +}
    +
    +func init() {
    +	RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
    +	RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
    +}
    +
    +// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
    +func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
    +	entityAccessRegistry.protection.Lock()
    +	defer entityAccessRegistry.protection.Unlock()
    +	entityAccessRegistry.accessors[mime] = erw
    +}
    +
    +// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
    +// This package is already initialized with such an accessor using the MIME_JSON contentType.
    +func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
    +	return entityJSONAccess{ContentType: contentType}
    +}
    +
    +// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
    +// This package is already initialized with such an accessor using the MIME_XML contentType.
    +func NewEntityAccessorXML(contentType string) EntityReaderWriter {
    +	return entityXMLAccess{ContentType: contentType}
    +}
    +
    +// accessorAt returns the registered ReaderWriter for this MIME type.
    +func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
    +	r.protection.RLock()
    +	defer r.protection.RUnlock()
    +	er, ok := r.accessors[mime]
    +	if !ok {
    +		// retry with reverse lookup
    +		// more expensive but we are in an exceptional situation anyway
    +		for k, v := range r.accessors {
    +			if strings.Contains(mime, k) {
    +				return v, true
    +			}
    +		}
    +	}
    +	return er, ok
    +}
    +
    +// entityXMLAccess is a EntityReaderWriter for XML encoding
    +type entityXMLAccess struct {
    +	// This is used for setting the Content-Type header when writing
    +	ContentType string
    +}
    +
    +// Read unmarshalls the value from XML
    +func (e entityXMLAccess) Read(req *Request, v interface{}) error {
    +	return xml.NewDecoder(req.Request.Body).Decode(v)
    +}
    +
    +// Write marshalls the value to JSON and set the Content-Type Header.
    +func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
    +	return writeXML(resp, status, e.ContentType, v)
    +}
    +
    +// writeXML marshalls the value to JSON and set the Content-Type Header.
    +func writeXML(resp *Response, status int, contentType string, v interface{}) error {
    +	if v == nil {
    +		resp.WriteHeader(status)
    +		// do not write a nil representation
    +		return nil
    +	}
    +	if resp.prettyPrint {
    +		// pretty output must be created and written explicitly
    +		output, err := xml.MarshalIndent(v, " ", " ")
    +		if err != nil {
    +			return err
    +		}
    +		resp.Header().Set(HEADER_ContentType, contentType)
    +		resp.WriteHeader(status)
    +		_, err = resp.Write([]byte(xml.Header))
    +		if err != nil {
    +			return err
    +		}
    +		_, err = resp.Write(output)
    +		return err
    +	}
    +	// not-so-pretty
    +	resp.Header().Set(HEADER_ContentType, contentType)
    +	resp.WriteHeader(status)
    +	return xml.NewEncoder(resp).Encode(v)
    +}
    +
    +// entityJSONAccess is a EntityReaderWriter for JSON encoding
    +type entityJSONAccess struct {
    +	// This is used for setting the Content-Type header when writing
    +	ContentType string
    +}
    +
    +// Read unmarshalls the value from JSON
    +func (e entityJSONAccess) Read(req *Request, v interface{}) error {
    +	decoder := json.NewDecoder(req.Request.Body)
    +	decoder.UseNumber()
    +	return decoder.Decode(v)
    +}
    +
    +// Write marshalls the value to JSON and set the Content-Type Header.
    +func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
    +	return writeJSON(resp, status, e.ContentType, v)
    +}
    +
    +// write marshalls the value to JSON and set the Content-Type Header.
    +func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
    +	if v == nil {
    +		resp.WriteHeader(status)
    +		// do not write a nil representation
    +		return nil
    +	}
    +	if resp.prettyPrint {
    +		// pretty output must be created and written explicitly
    +		output, err := json.MarshalIndent(v, " ", " ")
    +		if err != nil {
    +			return err
    +		}
    +		resp.Header().Set(HEADER_ContentType, contentType)
    +		resp.WriteHeader(status)
    +		_, err = resp.Write(output)
    +		return err
    +	}
    +	// not-so-pretty
    +	resp.Header().Set(HEADER_ContentType, contentType)
    +	resp.WriteHeader(status)
    +	return json.NewEncoder(resp).Encode(v)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/filter.go b/src/prometheus/vendor/github.com/emicklei/go-restful/filter.go
    new file mode 100644
    index 0000000..4b86656
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/filter.go
    @@ -0,0 +1,26 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
    +type FilterChain struct {
    +	Filters []FilterFunction // ordered list of FilterFunction
    +	Index   int              // index into filters that is currently in progress
    +	Target  RouteFunction    // function to call after passing all filters
    +}
    +
    +// ProcessFilter passes the request,response pair through the next of Filters.
    +// Each filter can decide to proceed to the next Filter or handle the Response itself.
    +func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
    +	if f.Index < len(f.Filters) {
    +		f.Index++
    +		f.Filters[f.Index-1](request, response, f)
    +	} else {
    +		f.Target(request, response)
    +	}
    +}
    +
    +// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
    +type FilterFunction func(*Request, *Response, *FilterChain)
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/jsr311.go b/src/prometheus/vendor/github.com/emicklei/go-restful/jsr311.go
    new file mode 100644
    index 0000000..511444a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/jsr311.go
    @@ -0,0 +1,248 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"errors"
    +	"fmt"
    +	"net/http"
    +	"sort"
    +)
    +
    +// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
    +// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
    +// RouterJSR311 implements the Router interface.
    +// Concept of locators is not implemented.
    +type RouterJSR311 struct{}
    +
    +// SelectRoute is part of the Router interface and returns the best match
    +// for the WebService and its Route for the given Request.
    +func (r RouterJSR311) SelectRoute(
    +	webServices []*WebService,
    +	httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
    +
    +	// Identify the root resource class (WebService)
    +	dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
    +	if err != nil {
    +		return nil, nil, NewError(http.StatusNotFound, "")
    +	}
    +	// Obtain the set of candidate methods (Routes)
    +	routes := r.selectRoutes(dispatcher, finalMatch)
    +	if len(routes) == 0 {
    +		return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
    +	}
    +
    +	// Identify the method (Route) that will handle the request
    +	route, ok := r.detectRoute(routes, httpRequest)
    +	return dispatcher, route, ok
    +}
    +
    +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
    +func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
    +	// http method
    +	methodOk := []Route{}
    +	for _, each := range routes {
    +		if httpRequest.Method == each.Method {
    +			methodOk = append(methodOk, each)
    +		}
    +	}
    +	if len(methodOk) == 0 {
    +		if trace {
    +			traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
    +		}
    +		return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
    +	}
    +	inputMediaOk := methodOk
    +
    +	// content-type
    +	contentType := httpRequest.Header.Get(HEADER_ContentType)
    +	inputMediaOk = []Route{}
    +	for _, each := range methodOk {
    +		if each.matchesContentType(contentType) {
    +			inputMediaOk = append(inputMediaOk, each)
    +		}
    +	}
    +	if len(inputMediaOk) == 0 {
    +		if trace {
    +			traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
    +		}
    +		return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
    +	}
    +
    +	// accept
    +	outputMediaOk := []Route{}
    +	accept := httpRequest.Header.Get(HEADER_Accept)
    +	if len(accept) == 0 {
    +		accept = "*/*"
    +	}
    +	for _, each := range inputMediaOk {
    +		if each.matchesAccept(accept) {
    +			outputMediaOk = append(outputMediaOk, each)
    +		}
    +	}
    +	if len(outputMediaOk) == 0 {
    +		if trace {
    +			traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
    +		}
    +		return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
    +	}
    +	// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
    +	return &outputMediaOk[0], nil
    +}
    +
    +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
    +// n/m > n/* > */*
    +func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
    +	// TODO
    +	return &routes[0]
    +}
    +
    +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2  (step 2)
    +func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
    +	filtered := &sortableRouteCandidates{}
    +	for _, each := range dispatcher.Routes() {
    +		pathExpr := each.pathExpr
    +		matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
    +		if matches != nil {
    +			lastMatch := matches[len(matches)-1]
    +			if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
    +				filtered.candidates = append(filtered.candidates,
    +					routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
    +			}
    +		}
    +	}
    +	if len(filtered.candidates) == 0 {
    +		if trace {
    +			traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
    +		}
    +		return []Route{}
    +	}
    +	sort.Sort(sort.Reverse(filtered))
    +
    +	// select other routes from candidates whoes expression matches rmatch
    +	matchingRoutes := []Route{filtered.candidates[0].route}
    +	for c := 1; c < len(filtered.candidates); c++ {
    +		each := filtered.candidates[c]
    +		if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
    +			matchingRoutes = append(matchingRoutes, each.route)
    +		}
    +	}
    +	return matchingRoutes
    +}
    +
    +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
    +func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
    +	filtered := &sortableDispatcherCandidates{}
    +	for _, each := range dispatchers {
    +		matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
    +		if matches != nil {
    +			filtered.candidates = append(filtered.candidates,
    +				dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
    +		}
    +	}
    +	if len(filtered.candidates) == 0 {
    +		if trace {
    +			traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
    +		}
    +		return nil, "", errors.New("not found")
    +	}
    +	sort.Sort(sort.Reverse(filtered))
    +	return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
    +}
    +
    +// Types and functions to support the sorting of Routes
    +
    +type routeCandidate struct {
    +	route           Route
    +	matchesCount    int // the number of capturing groups
    +	literalCount    int // the number of literal characters (means those not resulting from template variable substitution)
    +	nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^  /]+?)’)
    +}
    +
    +func (r routeCandidate) expressionToMatch() string {
    +	return r.route.pathExpr.Source
    +}
    +
    +func (r routeCandidate) String() string {
    +	return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
    +}
    +
    +type sortableRouteCandidates struct {
    +	candidates []routeCandidate
    +}
    +
    +func (rcs *sortableRouteCandidates) Len() int {
    +	return len(rcs.candidates)
    +}
    +func (rcs *sortableRouteCandidates) Swap(i, j int) {
    +	rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
    +}
    +func (rcs *sortableRouteCandidates) Less(i, j int) bool {
    +	ci := rcs.candidates[i]
    +	cj := rcs.candidates[j]
    +	// primary key
    +	if ci.literalCount < cj.literalCount {
    +		return true
    +	}
    +	if ci.literalCount > cj.literalCount {
    +		return false
    +	}
    +	// secundary key
    +	if ci.matchesCount < cj.matchesCount {
    +		return true
    +	}
    +	if ci.matchesCount > cj.matchesCount {
    +		return false
    +	}
    +	// tertiary key
    +	if ci.nonDefaultCount < cj.nonDefaultCount {
    +		return true
    +	}
    +	if ci.nonDefaultCount > cj.nonDefaultCount {
    +		return false
    +	}
    +	// quaternary key ("source" is interpreted as Path)
    +	return ci.route.Path < cj.route.Path
    +}
    +
    +// Types and functions to support the sorting of Dispatchers
    +
    +type dispatcherCandidate struct {
    +	dispatcher      *WebService
    +	finalMatch      string
    +	matchesCount    int // the number of capturing groups
    +	literalCount    int // the number of literal characters (means those not resulting from template variable substitution)
    +	nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^  /]+?)’)
    +}
    +type sortableDispatcherCandidates struct {
    +	candidates []dispatcherCandidate
    +}
    +
    +func (dc *sortableDispatcherCandidates) Len() int {
    +	return len(dc.candidates)
    +}
    +func (dc *sortableDispatcherCandidates) Swap(i, j int) {
    +	dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
    +}
    +func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
    +	ci := dc.candidates[i]
    +	cj := dc.candidates[j]
    +	// primary key
    +	if ci.matchesCount < cj.matchesCount {
    +		return true
    +	}
    +	if ci.matchesCount > cj.matchesCount {
    +		return false
    +	}
    +	// secundary key
    +	if ci.literalCount < cj.literalCount {
    +		return true
    +	}
    +	if ci.literalCount > cj.literalCount {
    +		return false
    +	}
    +	// tertiary key
    +	return ci.nonDefaultCount < cj.nonDefaultCount
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/log/log.go b/src/prometheus/vendor/github.com/emicklei/go-restful/log/log.go
    new file mode 100644
    index 0000000..f70d895
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/log/log.go
    @@ -0,0 +1,31 @@
    +package log
    +
    +import (
    +	stdlog "log"
    +	"os"
    +)
    +
    +// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
    +type StdLogger interface {
    +	Print(v ...interface{})
    +	Printf(format string, v ...interface{})
    +}
    +
    +var Logger StdLogger
    +
    +func init() {
    +	// default Logger
    +	SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
    +}
    +
    +func SetLogger(customLogger StdLogger) {
    +	Logger = customLogger
    +}
    +
    +func Print(v ...interface{}) {
    +	Logger.Print(v...)
    +}
    +
    +func Printf(format string, v ...interface{}) {
    +	Logger.Printf(format, v...)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/logger.go b/src/prometheus/vendor/github.com/emicklei/go-restful/logger.go
    new file mode 100644
    index 0000000..3f1c4db
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/logger.go
    @@ -0,0 +1,32 @@
    +package restful
    +
    +// Copyright 2014 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +import (
    +	"github.com/emicklei/go-restful/log"
    +)
    +
    +var trace bool = false
    +var traceLogger log.StdLogger
    +
    +func init() {
    +	traceLogger = log.Logger // use the package logger by default
    +}
    +
    +// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
    +// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
    +func TraceLogger(logger log.StdLogger) {
    +	traceLogger = logger
    +	EnableTracing(logger != nil)
    +}
    +
    +// expose the setter for the global logger on the top-level package
    +func SetLogger(customLogger log.StdLogger) {
    +	log.SetLogger(customLogger)
    +}
    +
    +// EnableTracing can be used to Trace logging on and off.
    +func EnableTracing(enabled bool) {
    +	trace = enabled
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/mime.go b/src/prometheus/vendor/github.com/emicklei/go-restful/mime.go
    new file mode 100644
    index 0000000..d7ea2b6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/mime.go
    @@ -0,0 +1,45 @@
    +package restful
    +
    +import (
    +	"strconv"
    +	"strings"
    +)
    +
    +type mime struct {
    +	media   string
    +	quality float64
    +}
    +
    +// insertMime adds a mime to a list and keeps it sorted by quality.
    +func insertMime(l []mime, e mime) []mime {
    +	for i, each := range l {
    +		// if current mime has lower quality then insert before
    +		if e.quality > each.quality {
    +			left := append([]mime{}, l[0:i]...)
    +			return append(append(left, e), l[i:]...)
    +		}
    +	}
    +	return append(l, e)
    +}
    +
    +// sortedMimes returns a list of mime sorted (desc) by its specified quality.
    +func sortedMimes(accept string) (sorted []mime) {
    +	for _, each := range strings.Split(accept, ",") {
    +		typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
    +		if len(typeAndQuality) == 1 {
    +			sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
    +		} else {
    +			// take factor
    +			parts := strings.Split(typeAndQuality[1], "=")
    +			if len(parts) == 2 {
    +				f, err := strconv.ParseFloat(parts[1], 64)
    +				if err != nil {
    +					traceLogger.Printf("unable to parse quality in %s, %v", each, err)
    +				} else {
    +					sorted = insertMime(sorted, mime{typeAndQuality[0], f})
    +				}
    +			}
    +		}
    +	}
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/options_filter.go b/src/prometheus/vendor/github.com/emicklei/go-restful/options_filter.go
    new file mode 100644
    index 0000000..4514ead
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/options_filter.go
    @@ -0,0 +1,26 @@
    +package restful
    +
    +import "strings"
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
    +// and provides the response with a set of allowed methods for the request URL Path.
    +// As for any filter, you can also install it for a particular WebService within a Container.
    +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
    +func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
    +	if "OPTIONS" != req.Request.Method {
    +		chain.ProcessFilter(req, resp)
    +		return
    +	}
    +	resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
    +}
    +
    +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
    +// and provides the response with a set of allowed methods for the request URL Path.
    +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
    +func OPTIONSFilter() FilterFunction {
    +	return DefaultContainer.OPTIONSFilter
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/parameter.go b/src/prometheus/vendor/github.com/emicklei/go-restful/parameter.go
    new file mode 100644
    index 0000000..e11c816
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/parameter.go
    @@ -0,0 +1,114 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +const (
    +	// PathParameterKind = indicator of Request parameter type "path"
    +	PathParameterKind = iota
    +
    +	// QueryParameterKind = indicator of Request parameter type "query"
    +	QueryParameterKind
    +
    +	// BodyParameterKind = indicator of Request parameter type "body"
    +	BodyParameterKind
    +
    +	// HeaderParameterKind = indicator of Request parameter type "header"
    +	HeaderParameterKind
    +
    +	// FormParameterKind = indicator of Request parameter type "form"
    +	FormParameterKind
    +)
    +
    +// Parameter is for documententing the parameter used in a Http Request
    +// ParameterData kinds are Path,Query and Body
    +type Parameter struct {
    +	data *ParameterData
    +}
    +
    +// ParameterData represents the state of a Parameter.
    +// It is made public to make it accessible to e.g. the Swagger package.
    +type ParameterData struct {
    +	Name, Description, DataType, DataFormat string
    +	Kind                                    int
    +	Required                                bool
    +	AllowableValues                         map[string]string
    +	AllowMultiple                           bool
    +	DefaultValue                            string
    +}
    +
    +// Data returns the state of the Parameter
    +func (p *Parameter) Data() ParameterData {
    +	return *p.data
    +}
    +
    +// Kind returns the parameter type indicator (see const for valid values)
    +func (p *Parameter) Kind() int {
    +	return p.data.Kind
    +}
    +
    +func (p *Parameter) bePath() *Parameter {
    +	p.data.Kind = PathParameterKind
    +	return p
    +}
    +func (p *Parameter) beQuery() *Parameter {
    +	p.data.Kind = QueryParameterKind
    +	return p
    +}
    +func (p *Parameter) beBody() *Parameter {
    +	p.data.Kind = BodyParameterKind
    +	return p
    +}
    +
    +func (p *Parameter) beHeader() *Parameter {
    +	p.data.Kind = HeaderParameterKind
    +	return p
    +}
    +
    +func (p *Parameter) beForm() *Parameter {
    +	p.data.Kind = FormParameterKind
    +	return p
    +}
    +
    +// Required sets the required field and returns the receiver
    +func (p *Parameter) Required(required bool) *Parameter {
    +	p.data.Required = required
    +	return p
    +}
    +
    +// AllowMultiple sets the allowMultiple field and returns the receiver
    +func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
    +	p.data.AllowMultiple = multiple
    +	return p
    +}
    +
    +// AllowableValues sets the allowableValues field and returns the receiver
    +func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
    +	p.data.AllowableValues = values
    +	return p
    +}
    +
    +// DataType sets the dataType field and returns the receiver
    +func (p *Parameter) DataType(typeName string) *Parameter {
    +	p.data.DataType = typeName
    +	return p
    +}
    +
    +// DataFormat sets the dataFormat field for Swagger UI
    +func (p *Parameter) DataFormat(formatName string) *Parameter {
    +	p.data.DataFormat = formatName
    +	return p
    +}
    +
    +// DefaultValue sets the default value field and returns the receiver
    +func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
    +	p.data.DefaultValue = stringRepresentation
    +	return p
    +}
    +
    +// Description sets the description value field and returns the receiver
    +func (p *Parameter) Description(doc string) *Parameter {
    +	p.data.Description = doc
    +	return p
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/path_expression.go b/src/prometheus/vendor/github.com/emicklei/go-restful/path_expression.go
    new file mode 100644
    index 0000000..a921e6f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/path_expression.go
    @@ -0,0 +1,69 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"regexp"
    +	"strings"
    +)
    +
    +// PathExpression holds a compiled path expression (RegExp) needed to match against
    +// Http request paths and to extract path parameter values.
    +type pathExpression struct {
    +	LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
    +	VarCount     int // the number of named parameters (enclosed by {}) in the path
    +	Matcher      *regexp.Regexp
    +	Source       string // Path as defined by the RouteBuilder
    +	tokens       []string
    +}
    +
    +// NewPathExpression creates a PathExpression from the input URL path.
    +// Returns an error if the path is invalid.
    +func newPathExpression(path string) (*pathExpression, error) {
    +	expression, literalCount, varCount, tokens := templateToRegularExpression(path)
    +	compiled, err := regexp.Compile(expression)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
    +}
    +
    +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
    +func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
    +	var buffer bytes.Buffer
    +	buffer.WriteString("^")
    +	//tokens = strings.Split(template, "/")
    +	tokens = tokenizePath(template)
    +	for _, each := range tokens {
    +		if each == "" {
    +			continue
    +		}
    +		buffer.WriteString("/")
    +		if strings.HasPrefix(each, "{") {
    +			// check for regular expression in variable
    +			colon := strings.Index(each, ":")
    +			if colon != -1 {
    +				// extract expression
    +				paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
    +				if paramExpr == "*" { // special case
    +					buffer.WriteString("(.*)")
    +				} else {
    +					buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
    +				}
    +			} else {
    +				// plain var
    +				buffer.WriteString("([^/]+?)")
    +			}
    +			varCount += 1
    +		} else {
    +			literalCount += len(each)
    +			encoded := each // TODO URI encode
    +			buffer.WriteString(regexp.QuoteMeta(encoded))
    +		}
    +	}
    +	return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/request.go b/src/prometheus/vendor/github.com/emicklei/go-restful/request.go
    new file mode 100644
    index 0000000..3e42346
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/request.go
    @@ -0,0 +1,131 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"compress/zlib"
    +	"io/ioutil"
    +	"net/http"
    +)
    +
    +var defaultRequestContentType string
    +
    +var doCacheReadEntityBytes = true
    +
    +// Request is a wrapper for a http Request that provides convenience methods
    +type Request struct {
    +	Request           *http.Request
    +	bodyContent       *[]byte // to cache the request body for multiple reads of ReadEntity
    +	pathParameters    map[string]string
    +	attributes        map[string]interface{} // for storing request-scoped values
    +	selectedRoutePath string                 // root path + route path that matched the request, e.g. /meetings/{id}/attendees
    +}
    +
    +func NewRequest(httpRequest *http.Request) *Request {
    +	return &Request{
    +		Request:        httpRequest,
    +		pathParameters: map[string]string{},
    +		attributes:     map[string]interface{}{},
    +	} // empty parameters, attributes
    +}
    +
    +// If ContentType is missing or */* is given then fall back to this type, otherwise
    +// a "Unable to unmarshal content of type:" response is returned.
    +// Valid values are restful.MIME_JSON and restful.MIME_XML
    +// Example:
    +// 	restful.DefaultRequestContentType(restful.MIME_JSON)
    +func DefaultRequestContentType(mime string) {
    +	defaultRequestContentType = mime
    +}
    +
    +// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
    +// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
    +func SetCacheReadEntity(doCache bool) {
    +	doCacheReadEntityBytes = doCache
    +}
    +
    +// PathParameter accesses the Path parameter value by its name
    +func (r *Request) PathParameter(name string) string {
    +	return r.pathParameters[name]
    +}
    +
    +// PathParameters accesses the Path parameter values
    +func (r *Request) PathParameters() map[string]string {
    +	return r.pathParameters
    +}
    +
    +// QueryParameter returns the (first) Query parameter value by its name
    +func (r *Request) QueryParameter(name string) string {
    +	return r.Request.FormValue(name)
    +}
    +
    +// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
    +func (r *Request) BodyParameter(name string) (string, error) {
    +	err := r.Request.ParseForm()
    +	if err != nil {
    +		return "", err
    +	}
    +	return r.Request.PostFormValue(name), nil
    +}
    +
    +// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
    +func (r *Request) HeaderParameter(name string) string {
    +	return r.Request.Header.Get(name)
    +}
    +
    +// ReadEntity checks the Accept header and reads the content into the entityPointer.
    +func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
    +	contentType := r.Request.Header.Get(HEADER_ContentType)
    +	contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
    +
    +	// OLD feature, cache the body for reads
    +	if doCacheReadEntityBytes {
    +		if r.bodyContent == nil {
    +			data, err := ioutil.ReadAll(r.Request.Body)
    +			if err != nil {
    +				return err
    +			}
    +			r.bodyContent = &data
    +		}
    +		r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
    +	}
    +
    +	// check if the request body needs decompression
    +	if ENCODING_GZIP == contentEncoding {
    +		gzipReader := currentCompressorProvider.AcquireGzipReader()
    +		defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
    +		gzipReader.Reset(r.Request.Body)
    +		r.Request.Body = gzipReader
    +	} else if ENCODING_DEFLATE == contentEncoding {
    +		zlibReader, err := zlib.NewReader(r.Request.Body)
    +		if err != nil {
    +			return err
    +		}
    +		r.Request.Body = zlibReader
    +	}
    +
    +	// lookup the EntityReader
    +	entityReader, ok := entityAccessRegistry.accessorAt(contentType)
    +	if !ok {
    +		return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
    +	}
    +	return entityReader.Read(r, entityPointer)
    +}
    +
    +// SetAttribute adds or replaces the attribute with the given value.
    +func (r *Request) SetAttribute(name string, value interface{}) {
    +	r.attributes[name] = value
    +}
    +
    +// Attribute returns the value associated to the given name. Returns nil if absent.
    +func (r Request) Attribute(name string) interface{} {
    +	return r.attributes[name]
    +}
    +
    +// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
    +func (r Request) SelectedRoutePath() string {
    +	return r.selectedRoutePath
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/response.go b/src/prometheus/vendor/github.com/emicklei/go-restful/response.go
    new file mode 100644
    index 0000000..971cd0b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/response.go
    @@ -0,0 +1,235 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"errors"
    +	"net/http"
    +)
    +
    +// DEPRECATED, use DefaultResponseContentType(mime)
    +var DefaultResponseMimeType string
    +
    +//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
    +var PrettyPrintResponses = true
    +
    +// Response is a wrapper on the actual http ResponseWriter
    +// It provides several convenience methods to prepare and write response content.
    +type Response struct {
    +	http.ResponseWriter
    +	requestAccept string   // mime-type what the Http Request says it wants to receive
    +	routeProduces []string // mime-types what the Route says it can produce
    +	statusCode    int      // HTTP status code that has been written explicity (if zero then net/http has written 200)
    +	contentLength int      // number of bytes written for the response body
    +	prettyPrint   bool     // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
    +	err           error    // err property is kept when WriteError is called
    +}
    +
    +// Creates a new response based on a http ResponseWriter.
    +func NewResponse(httpWriter http.ResponseWriter) *Response {
    +	return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
    +}
    +
    +// If Accept header matching fails, fall back to this type.
    +// Valid values are restful.MIME_JSON and restful.MIME_XML
    +// Example:
    +// 	restful.DefaultResponseContentType(restful.MIME_JSON)
    +func DefaultResponseContentType(mime string) {
    +	DefaultResponseMimeType = mime
    +}
    +
    +// InternalServerError writes the StatusInternalServerError header.
    +// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
    +func (r Response) InternalServerError() Response {
    +	r.WriteHeader(http.StatusInternalServerError)
    +	return r
    +}
    +
    +// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
    +func (r *Response) PrettyPrint(bePretty bool) {
    +	r.prettyPrint = bePretty
    +}
    +
    +// AddHeader is a shortcut for .Header().Add(header,value)
    +func (r Response) AddHeader(header string, value string) Response {
    +	r.Header().Add(header, value)
    +	return r
    +}
    +
    +// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
    +func (r *Response) SetRequestAccepts(mime string) {
    +	r.requestAccept = mime
    +}
    +
    +// EntityWriter returns the registered EntityWriter that the entity (requested resource)
    +// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
    +// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
    +func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
    +	sorted := sortedMimes(r.requestAccept)
    +	for _, eachAccept := range sorted {
    +		for _, eachProduce := range r.routeProduces {
    +			if eachProduce == eachAccept.media {
    +				if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
    +					return w, true
    +				}
    +			}
    +		}
    +		if eachAccept.media == "*/*" {
    +			for _, each := range r.routeProduces {
    +				if w, ok := entityAccessRegistry.accessorAt(each); ok {
    +					return w, true
    +				}
    +			}
    +		}
    +	}
    +	// if requestAccept is empty
    +	writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
    +	if !ok {
    +		// if not registered then fallback to the defaults (if set)
    +		if DefaultResponseMimeType == MIME_JSON {
    +			return entityAccessRegistry.accessorAt(MIME_JSON)
    +		}
    +		if DefaultResponseMimeType == MIME_XML {
    +			return entityAccessRegistry.accessorAt(MIME_XML)
    +		}
    +		// Fallback to whatever the route says it can produce.
    +		// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
    +		for _, each := range r.routeProduces {
    +			if w, ok := entityAccessRegistry.accessorAt(each); ok {
    +				return w, true
    +			}
    +		}
    +		if trace {
    +			traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
    +		}
    +	}
    +	return writer, ok
    +}
    +
    +// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
    +func (r *Response) WriteEntity(value interface{}) error {
    +	return r.WriteHeaderAndEntity(http.StatusOK, value)
    +}
    +
    +// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
    +// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
    +// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
    +// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
    +// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
    +// Current implementation ignores any q-parameters in the Accept Header.
    +// Returns an error if the value could not be written on the response.
    +func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
    +	writer, ok := r.EntityWriter()
    +	if !ok {
    +		r.WriteHeader(http.StatusNotAcceptable)
    +		return nil
    +	}
    +	return writer.Write(r, status, value)
    +}
    +
    +// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
    +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
    +func (r *Response) WriteAsXml(value interface{}) error {
    +	return writeXML(r, http.StatusOK, MIME_XML, value)
    +}
    +
    +// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
    +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
    +func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
    +	return writeXML(r, status, MIME_XML, value)
    +}
    +
    +// WriteAsJson is a convenience method for writing a value in json.
    +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
    +func (r *Response) WriteAsJson(value interface{}) error {
    +	return writeJSON(r, http.StatusOK, MIME_JSON, value)
    +}
    +
    +// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
    +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
    +func (r *Response) WriteJson(value interface{}, contentType string) error {
    +	return writeJSON(r, http.StatusOK, contentType, value)
    +}
    +
    +// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
    +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
    +func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
    +	return writeJSON(r, status, contentType, value)
    +}
    +
    +// WriteError write the http status and the error string on the response.
    +func (r *Response) WriteError(httpStatus int, err error) error {
    +	r.err = err
    +	return r.WriteErrorString(httpStatus, err.Error())
    +}
    +
    +// WriteServiceError is a convenience method for a responding with a status and a ServiceError
    +func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
    +	r.err = err
    +	return r.WriteHeaderAndEntity(httpStatus, err)
    +}
    +
    +// WriteErrorString is a convenience method for an error status with the actual error
    +func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
    +	if r.err == nil {
    +		// if not called from WriteError
    +		r.err = errors.New(errorReason)
    +	}
    +	r.WriteHeader(httpStatus)
    +	if _, err := r.Write([]byte(errorReason)); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// Flush implements http.Flusher interface, which sends any buffered data to the client.
    +func (r *Response) Flush() {
    +	if f, ok := r.ResponseWriter.(http.Flusher); ok {
    +		f.Flush()
    +	} else if trace {
    +		traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
    +	}
    +}
    +
    +// WriteHeader is overridden to remember the Status Code that has been written.
    +// Changes to the Header of the response have no effect after this.
    +func (r *Response) WriteHeader(httpStatus int) {
    +	r.statusCode = httpStatus
    +	r.ResponseWriter.WriteHeader(httpStatus)
    +}
    +
    +// StatusCode returns the code that has been written using WriteHeader.
    +func (r Response) StatusCode() int {
    +	if 0 == r.statusCode {
    +		// no status code has been written yet; assume OK
    +		return http.StatusOK
    +	}
    +	return r.statusCode
    +}
    +
    +// Write writes the data to the connection as part of an HTTP reply.
    +// Write is part of http.ResponseWriter interface.
    +func (r *Response) Write(bytes []byte) (int, error) {
    +	written, err := r.ResponseWriter.Write(bytes)
    +	r.contentLength += written
    +	return written, err
    +}
    +
    +// ContentLength returns the number of bytes written for the response content.
    +// Note that this value is only correct if all data is written through the Response using its Write* methods.
    +// Data written directly using the underlying http.ResponseWriter is not accounted for.
    +func (r Response) ContentLength() int {
    +	return r.contentLength
    +}
    +
    +// CloseNotify is part of http.CloseNotifier interface
    +func (r Response) CloseNotify() <-chan bool {
    +	return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
    +}
    +
    +// Error returns the err created by WriteError
    +func (r Response) Error() error {
    +	return r.err
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/route.go b/src/prometheus/vendor/github.com/emicklei/go-restful/route.go
    new file mode 100644
    index 0000000..f54e862
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/route.go
    @@ -0,0 +1,183 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"net/http"
    +	"strings"
    +)
    +
    +// RouteFunction declares the signature of a function that can be bound to a Route.
    +type RouteFunction func(*Request, *Response)
    +
    +// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
    +type Route struct {
    +	Method   string
    +	Produces []string
    +	Consumes []string
    +	Path     string // webservice root path + described path
    +	Function RouteFunction
    +	Filters  []FilterFunction
    +
    +	// cached values for dispatching
    +	relativePath string
    +	pathParts    []string
    +	pathExpr     *pathExpression // cached compilation of relativePath as RegExp
    +
    +	// documentation
    +	Doc                     string
    +	Notes                   string
    +	Operation               string
    +	ParameterDocs           []*Parameter
    +	ResponseErrors          map[int]ResponseError
    +	ReadSample, WriteSample interface{} // structs that model an example request or response payload
    +}
    +
    +// Initialize for Route
    +func (r *Route) postBuild() {
    +	r.pathParts = tokenizePath(r.Path)
    +}
    +
    +// Create Request and Response from their http versions
    +func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
    +	params := r.extractParameters(httpRequest.URL.Path)
    +	wrappedRequest := NewRequest(httpRequest)
    +	wrappedRequest.pathParameters = params
    +	wrappedRequest.selectedRoutePath = r.Path
    +	wrappedResponse := NewResponse(httpWriter)
    +	wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
    +	wrappedResponse.routeProduces = r.Produces
    +	return wrappedRequest, wrappedResponse
    +}
    +
    +// dispatchWithFilters call the function after passing through its own filters
    +func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
    +	if len(r.Filters) > 0 {
    +		chain := FilterChain{Filters: r.Filters, Target: r.Function}
    +		chain.ProcessFilter(wrappedRequest, wrappedResponse)
    +	} else {
    +		// unfiltered
    +		r.Function(wrappedRequest, wrappedResponse)
    +	}
    +}
    +
    +// Return whether the mimeType matches to what this Route can produce.
    +func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
    +	parts := strings.Split(mimeTypesWithQuality, ",")
    +	for _, each := range parts {
    +		var withoutQuality string
    +		if strings.Contains(each, ";") {
    +			withoutQuality = strings.Split(each, ";")[0]
    +		} else {
    +			withoutQuality = each
    +		}
    +		// trim before compare
    +		withoutQuality = strings.Trim(withoutQuality, " ")
    +		if withoutQuality == "*/*" {
    +			return true
    +		}
    +		for _, producibleType := range r.Produces {
    +			if producibleType == "*/*" || producibleType == withoutQuality {
    +				return true
    +			}
    +		}
    +	}
    +	return false
    +}
    +
    +// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
    +func (r Route) matchesContentType(mimeTypes string) bool {
    +
    +	if len(r.Consumes) == 0 {
    +		// did not specify what it can consume ;  any media type (“*/*”) is assumed
    +		return true
    +	}
    +
    +	if len(mimeTypes) == 0 {
    +		// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
    +		m := r.Method
    +		if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
    +			return true
    +		}
    +		// proceed with default
    +		mimeTypes = MIME_OCTET
    +	}
    +
    +	parts := strings.Split(mimeTypes, ",")
    +	for _, each := range parts {
    +		var contentType string
    +		if strings.Contains(each, ";") {
    +			contentType = strings.Split(each, ";")[0]
    +		} else {
    +			contentType = each
    +		}
    +		// trim before compare
    +		contentType = strings.Trim(contentType, " ")
    +		for _, consumeableType := range r.Consumes {
    +			if consumeableType == "*/*" || consumeableType == contentType {
    +				return true
    +			}
    +		}
    +	}
    +	return false
    +}
    +
    +// Extract the parameters from the request url path
    +func (r Route) extractParameters(urlPath string) map[string]string {
    +	urlParts := tokenizePath(urlPath)
    +	pathParameters := map[string]string{}
    +	for i, key := range r.pathParts {
    +		var value string
    +		if i >= len(urlParts) {
    +			value = ""
    +		} else {
    +			value = urlParts[i]
    +		}
    +		if strings.HasPrefix(key, "{") { // path-parameter
    +			if colon := strings.Index(key, ":"); colon != -1 {
    +				// extract by regex
    +				regPart := key[colon+1 : len(key)-1]
    +				keyPart := key[1:colon]
    +				if regPart == "*" {
    +					pathParameters[keyPart] = untokenizePath(i, urlParts)
    +					break
    +				} else {
    +					pathParameters[keyPart] = value
    +				}
    +			} else {
    +				// without enclosing {}
    +				pathParameters[key[1:len(key)-1]] = value
    +			}
    +		}
    +	}
    +	return pathParameters
    +}
    +
    +// Untokenize back into an URL path using the slash separator
    +func untokenizePath(offset int, parts []string) string {
    +	var buffer bytes.Buffer
    +	for p := offset; p < len(parts); p++ {
    +		buffer.WriteString(parts[p])
    +		// do not end
    +		if p < len(parts)-1 {
    +			buffer.WriteString("/")
    +		}
    +	}
    +	return buffer.String()
    +}
    +
    +// Tokenize an URL path using the slash separator ; the result does not have empty tokens
    +func tokenizePath(path string) []string {
    +	if "/" == path {
    +		return []string{}
    +	}
    +	return strings.Split(strings.Trim(path, "/"), "/")
    +}
    +
    +// for debugging
    +func (r Route) String() string {
    +	return r.Method + " " + r.Path
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/route_builder.go b/src/prometheus/vendor/github.com/emicklei/go-restful/route_builder.go
    new file mode 100644
    index 0000000..8bc1ab6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/route_builder.go
    @@ -0,0 +1,240 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"os"
    +	"reflect"
    +	"runtime"
    +	"strings"
    +
    +	"github.com/emicklei/go-restful/log"
    +)
    +
    +// RouteBuilder is a helper to construct Routes.
    +type RouteBuilder struct {
    +	rootPath    string
    +	currentPath string
    +	produces    []string
    +	consumes    []string
    +	httpMethod  string        // required
    +	function    RouteFunction // required
    +	filters     []FilterFunction
    +	// documentation
    +	doc                     string
    +	notes                   string
    +	operation               string
    +	readSample, writeSample interface{}
    +	parameters              []*Parameter
    +	errorMap                map[int]ResponseError
    +}
    +
    +// Do evaluates each argument with the RouteBuilder itself.
    +// This allows you to follow DRY principles without breaking the fluent programming style.
    +// Example:
    +// 		ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
    +//
    +//		func Returns500(b *RouteBuilder) {
    +//			b.Returns(500, "Internal Server Error", restful.ServiceError{})
    +//		}
    +func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
    +	for _, each := range oneArgBlocks {
    +		each(b)
    +	}
    +	return b
    +}
    +
    +// To bind the route to a function.
    +// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
    +func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
    +	b.function = function
    +	return b
    +}
    +
    +// Method specifies what HTTP method to match. Required.
    +func (b *RouteBuilder) Method(method string) *RouteBuilder {
    +	b.httpMethod = method
    +	return b
    +}
    +
    +// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
    +func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
    +	b.produces = mimeTypes
    +	return b
    +}
    +
    +// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
    +func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
    +	b.consumes = mimeTypes
    +	return b
    +}
    +
    +// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
    +func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
    +	b.currentPath = subPath
    +	return b
    +}
    +
    +// Doc tells what this route is all about. Optional.
    +func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
    +	b.doc = documentation
    +	return b
    +}
    +
    +// A verbose explanation of the operation behavior. Optional.
    +func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
    +	b.notes = notes
    +	return b
    +}
    +
    +// Reads tells what resource type will be read from the request payload. Optional.
    +// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
    +func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
    +	b.readSample = sample
    +	typeAsName := reflect.TypeOf(sample).String()
    +	bodyParameter := &Parameter{&ParameterData{Name: "body"}}
    +	bodyParameter.beBody()
    +	bodyParameter.Required(true)
    +	bodyParameter.DataType(typeAsName)
    +	b.Param(bodyParameter)
    +	return b
    +}
    +
    +// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
    +// Use this to modify or extend information for the Parameter (through its Data()).
    +func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
    +	for _, each := range b.parameters {
    +		if each.Data().Name == name {
    +			return each
    +		}
    +	}
    +	return p
    +}
    +
    +// Writes tells what resource type will be written as the response payload. Optional.
    +func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
    +	b.writeSample = sample
    +	return b
    +}
    +
    +// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
    +func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
    +	if b.parameters == nil {
    +		b.parameters = []*Parameter{}
    +	}
    +	b.parameters = append(b.parameters, parameter)
    +	return b
    +}
    +
    +// Operation allows you to document what the actual method/function call is of the Route.
    +// Unless called, the operation name is derived from the RouteFunction set using To(..).
    +func (b *RouteBuilder) Operation(name string) *RouteBuilder {
    +	b.operation = name
    +	return b
    +}
    +
    +// ReturnsError is deprecated, use Returns instead.
    +func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
    +	log.Print("ReturnsError is deprecated, use Returns instead.")
    +	return b.Returns(code, message, model)
    +}
    +
    +// Returns allows you to document what responses (errors or regular) can be expected.
    +// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
    +func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
    +	err := ResponseError{
    +		Code:    code,
    +		Message: message,
    +		Model:   model,
    +	}
    +	// lazy init because there is no NewRouteBuilder (yet)
    +	if b.errorMap == nil {
    +		b.errorMap = map[int]ResponseError{}
    +	}
    +	b.errorMap[code] = err
    +	return b
    +}
    +
    +type ResponseError struct {
    +	Code    int
    +	Message string
    +	Model   interface{}
    +}
    +
    +func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
    +	b.rootPath = path
    +	return b
    +}
    +
    +// Filter appends a FilterFunction to the end of filters for this Route to build.
    +func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
    +	b.filters = append(b.filters, filter)
    +	return b
    +}
    +
    +// If no specific Route path then set to rootPath
    +// If no specific Produces then set to rootProduces
    +// If no specific Consumes then set to rootConsumes
    +func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
    +	if len(b.produces) == 0 {
    +		b.produces = rootProduces
    +	}
    +	if len(b.consumes) == 0 {
    +		b.consumes = rootConsumes
    +	}
    +}
    +
    +// Build creates a new Route using the specification details collected by the RouteBuilder
    +func (b *RouteBuilder) Build() Route {
    +	pathExpr, err := newPathExpression(b.currentPath)
    +	if err != nil {
    +		log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
    +		os.Exit(1)
    +	}
    +	if b.function == nil {
    +		log.Printf("[restful] No function specified for route:" + b.currentPath)
    +		os.Exit(1)
    +	}
    +	operationName := b.operation
    +	if len(operationName) == 0 && b.function != nil {
    +		// extract from definition
    +		operationName = nameOfFunction(b.function)
    +	}
    +	route := Route{
    +		Method:         b.httpMethod,
    +		Path:           concatPath(b.rootPath, b.currentPath),
    +		Produces:       b.produces,
    +		Consumes:       b.consumes,
    +		Function:       b.function,
    +		Filters:        b.filters,
    +		relativePath:   b.currentPath,
    +		pathExpr:       pathExpr,
    +		Doc:            b.doc,
    +		Notes:          b.notes,
    +		Operation:      operationName,
    +		ParameterDocs:  b.parameters,
    +		ResponseErrors: b.errorMap,
    +		ReadSample:     b.readSample,
    +		WriteSample:    b.writeSample}
    +	route.postBuild()
    +	return route
    +}
    +
    +func concatPath(path1, path2 string) string {
    +	return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
    +}
    +
    +// nameOfFunction returns the short name of the function f for documentation.
    +// It uses a runtime feature for debugging ; its value may change for later Go versions.
    +func nameOfFunction(f interface{}) string {
    +	fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
    +	tokenized := strings.Split(fun.Name(), ".")
    +	last := tokenized[len(tokenized)-1]
    +	last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
    +	last = strings.TrimSuffix(last, ")-fm") // Go 1.5
    +	last = strings.TrimSuffix(last, "·fm")  // < Go 1.5
    +	last = strings.TrimSuffix(last, "-fm")  // Go 1.5
    +	return last
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/router.go b/src/prometheus/vendor/github.com/emicklei/go-restful/router.go
    new file mode 100644
    index 0000000..9b32fb6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/router.go
    @@ -0,0 +1,18 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import "net/http"
    +
    +// A RouteSelector finds the best matching Route given the input HTTP Request
    +type RouteSelector interface {
    +
    +	// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
    +	// It returns a selected Route and its containing WebService or an error indicating
    +	// a problem.
    +	SelectRoute(
    +		webServices []*WebService,
    +		httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/service_error.go b/src/prometheus/vendor/github.com/emicklei/go-restful/service_error.go
    new file mode 100644
    index 0000000..62d1108
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/service_error.go
    @@ -0,0 +1,23 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import "fmt"
    +
    +// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
    +type ServiceError struct {
    +	Code    int
    +	Message string
    +}
    +
    +// NewError returns a ServiceError using the code and reason
    +func NewError(code int, message string) ServiceError {
    +	return ServiceError{Code: code, Message: message}
    +}
    +
    +// Error returns a text representation of the service error
    +func (s ServiceError) Error() string {
    +	return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md
    new file mode 100644
    index 0000000..736f6f3
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md
    @@ -0,0 +1,43 @@
    +Change history of swagger
    +=
    +2015-10-16
    +- add type override mechanism for swagger models (MR 254, nathanejohnson)
    +- replace uses of wildcard in generated apidocs (issue 251)
    +
    +2015-05-25
    +- (api break) changed the type of Properties in Model
    +- (api break) changed the type of Models in ApiDeclaration
    +- (api break) changed the parameter type of PostBuildDeclarationMapFunc
    +
    +2015-04-09
    +- add ModelBuildable interface for customization of Model
    +
    +2015-03-17
    +- preserve order of Routes per WebService in Swagger listing
    +- fix use of $ref and type in Swagger models
    +- add api version to listing
    +
    +2014-11-14
    +- operation parameters are now sorted using ordering path,query,form,header,body
    +
    +2014-11-12
    +- respect omitempty tag value for embedded structs
    +- expose ApiVersion of WebService to Swagger ApiDeclaration
    +
    +2014-05-29
    +- (api add) Ability to define custom http.Handler to serve swagger-ui static files
    +
    +2014-05-04
    +- (fix) include model for array element type of response
    +
    +2014-01-03
    +- (fix) do not add primitive type to the Api models
    +
    +2013-11-27
    +- (fix) make Swagger work for WebServices with root ("/" or "") paths
    +
    +2013-10-29
    +- (api add) package variable LogInfo to customize logging function
    +
    +2013-10-15
    +- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)
    \ No newline at end of file
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go
    new file mode 100644
    index 0000000..9f4c369
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go
    @@ -0,0 +1,64 @@
    +package swagger
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +)
    +
    +// ApiDeclarationList maintains an ordered list of ApiDeclaration.
    +type ApiDeclarationList struct {
    +	List []ApiDeclaration
    +}
    +
    +// At returns the ApiDeclaration by its path unless absent, then ok is false
    +func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
    +	for _, each := range l.List {
    +		if each.ResourcePath == path {
    +			return each, true
    +		}
    +	}
    +	return a, false
    +}
    +
    +// Put adds or replaces a ApiDeclaration with this name
    +func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
    +	// maybe replace existing
    +	for i, each := range l.List {
    +		if each.ResourcePath == path {
    +			// replace
    +			l.List[i] = a
    +			return
    +		}
    +	}
    +	// add
    +	l.List = append(l.List, a)
    +}
    +
    +// Do enumerates all the properties, each with its assigned name
    +func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
    +	for _, each := range l.List {
    +		block(each.ResourcePath, each)
    +	}
    +}
    +
    +// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
    +func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
    +	var buf bytes.Buffer
    +	encoder := json.NewEncoder(&buf)
    +	buf.WriteString("{\n")
    +	for i, each := range l.List {
    +		buf.WriteString("\"")
    +		buf.WriteString(each.ResourcePath)
    +		buf.WriteString("\": ")
    +		encoder.Encode(each)
    +		if i < len(l.List)-1 {
    +			buf.WriteString(",\n")
    +		}
    +	}
    +	buf.WriteString("}")
    +	return buf.Bytes(), nil
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/config.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/config.go
    new file mode 100644
    index 0000000..510d6fc
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/config.go
    @@ -0,0 +1,38 @@
    +package swagger
    +
    +import (
    +	"net/http"
    +
    +	"github.com/emicklei/go-restful"
    +)
    +
    +// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
    +type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
    +
    +type MapSchemaFormatFunc func(typeName string) string
    +
    +type Config struct {
    +	// url where the services are available, e.g. http://localhost:8080
    +	// if left empty then the basePath of Swagger is taken from the actual request
    +	WebServicesUrl string
    +	// path where the JSON api is avaiable , e.g. /apidocs
    +	ApiPath string
    +	// [optional] path where the swagger UI will be served, e.g. /swagger
    +	SwaggerPath string
    +	// [optional] location of folder containing Swagger HTML5 application index.html
    +	SwaggerFilePath string
    +	// api listing is constructed from this list of restful WebServices.
    +	WebServices []*restful.WebService
    +	// will serve all static content (scripts,pages,images)
    +	StaticHandler http.Handler
    +	// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
    +	DisableCORS bool
    +	// Top-level API version. Is reflected in the resource listing.
    +	ApiVersion string
    +	// If set then call this handler after building the complete ApiDeclaration Map
    +	PostBuildHandler PostBuildDeclarationMapFunc
    +	// Swagger global info struct
    +	Info Info
    +	// [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field convertion.
    +	SchemaFormatHandler MapSchemaFormatFunc
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_builder.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_builder.go
    new file mode 100644
    index 0000000..398e830
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_builder.go
    @@ -0,0 +1,449 @@
    +package swagger
    +
    +import (
    +	"encoding/json"
    +	"reflect"
    +	"strings"
    +)
    +
    +// ModelBuildable is used for extending Structs that need more control over
    +// how the Model appears in the Swagger api declaration.
    +type ModelBuildable interface {
    +	PostBuildModel(m *Model) *Model
    +}
    +
    +type modelBuilder struct {
    +	Models *ModelList
    +	Config *Config
    +}
    +
    +type documentable interface {
    +	SwaggerDoc() map[string]string
    +}
    +
    +// Check if this structure has a method with signature func () SwaggerDoc() map[string]string
    +// If it exists, retrive the documentation and overwrite all struct tag descriptions
    +func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
    +	if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
    +		return docable.SwaggerDoc()
    +	}
    +	return make(map[string]string)
    +}
    +
    +// addModelFrom creates and adds a Model to the builder and detects and calls
    +// the post build hook for customizations
    +func (b modelBuilder) addModelFrom(sample interface{}) {
    +	if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
    +		// allow customizations
    +		if buildable, ok := sample.(ModelBuildable); ok {
    +			modelOrNil = buildable.PostBuildModel(modelOrNil)
    +			b.Models.Put(modelOrNil.Id, *modelOrNil)
    +		}
    +	}
    +}
    +
    +func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
    +	modelName := b.keyFrom(st)
    +	if nameOverride != "" {
    +		modelName = nameOverride
    +	}
    +	// no models needed for primitive types
    +	if b.isPrimitiveType(modelName) {
    +		return nil
    +	}
    +	// golang encoding/json packages says array and slice values encode as
    +	// JSON arrays, except that []byte encodes as a base64-encoded string.
    +	// If we see a []byte here, treat it at as a primitive type (string)
    +	// and deal with it in buildArrayTypeProperty.
    +	if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
    +		st.Elem().Kind() == reflect.Uint8 {
    +		return nil
    +	}
    +	// see if we already have visited this model
    +	if _, ok := b.Models.At(modelName); ok {
    +		return nil
    +	}
    +	sm := Model{
    +		Id:         modelName,
    +		Required:   []string{},
    +		Properties: ModelPropertyList{}}
    +
    +	// reference the model before further initializing (enables recursive structs)
    +	b.Models.Put(modelName, sm)
    +
    +	// check for slice or array
    +	if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
    +		b.addModel(st.Elem(), "")
    +		return &sm
    +	}
    +	// check for structure or primitive type
    +	if st.Kind() != reflect.Struct {
    +		return &sm
    +	}
    +
    +	fullDoc := getDocFromMethodSwaggerDoc2(st)
    +	modelDescriptions := []string{}
    +
    +	for i := 0; i < st.NumField(); i++ {
    +		field := st.Field(i)
    +		jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
    +		if len(modelDescription) > 0 {
    +			modelDescriptions = append(modelDescriptions, modelDescription)
    +		}
    +
    +		// add if not omitted
    +		if len(jsonName) != 0 {
    +			// update description
    +			if fieldDoc, ok := fullDoc[jsonName]; ok {
    +				prop.Description = fieldDoc
    +			}
    +			// update Required
    +			if b.isPropertyRequired(field) {
    +				sm.Required = append(sm.Required, jsonName)
    +			}
    +			sm.Properties.Put(jsonName, prop)
    +		}
    +	}
    +
    +	// We always overwrite documentation if SwaggerDoc method exists
    +	// "" is special for documenting the struct itself
    +	if modelDoc, ok := fullDoc[""]; ok {
    +		sm.Description = modelDoc
    +	} else if len(modelDescriptions) != 0 {
    +		sm.Description = strings.Join(modelDescriptions, "\n")
    +	}
    +
    +	// update model builder with completed model
    +	b.Models.Put(modelName, sm)
    +
    +	return &sm
    +}
    +
    +func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
    +	required := true
    +	if jsonTag := field.Tag.Get("json"); jsonTag != "" {
    +		s := strings.Split(jsonTag, ",")
    +		if len(s) > 1 && s[1] == "omitempty" {
    +			return false
    +		}
    +	}
    +	return required
    +}
    +
    +func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
    +	jsonName = b.jsonNameOfField(field)
    +	if len(jsonName) == 0 {
    +		// empty name signals skip property
    +		return "", "", prop
    +	}
    +
    +	if tag := field.Tag.Get("modelDescription"); tag != "" {
    +		modelDescription = tag
    +	}
    +
    +	prop.setPropertyMetadata(field)
    +	if prop.Type != nil {
    +		return jsonName, modelDescription, prop
    +	}
    +	fieldType := field.Type
    +
    +	// check if type is doing its own marshalling
    +	marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
    +	if fieldType.Implements(marshalerType) {
    +		var pType = "string"
    +		if prop.Type == nil {
    +			prop.Type = &pType
    +		}
    +		if prop.Format == "" {
    +			prop.Format = b.jsonSchemaFormat(fieldType.String())
    +		}
    +		return jsonName, modelDescription, prop
    +	}
    +
    +	// check if annotation says it is a string
    +	if jsonTag := field.Tag.Get("json"); jsonTag != "" {
    +		s := strings.Split(jsonTag, ",")
    +		if len(s) > 1 && s[1] == "string" {
    +			stringt := "string"
    +			prop.Type = &stringt
    +			return jsonName, modelDescription, prop
    +		}
    +	}
    +
    +	fieldKind := fieldType.Kind()
    +	switch {
    +	case fieldKind == reflect.Struct:
    +		jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
    +		return jsonName, modelDescription, prop
    +	case fieldKind == reflect.Slice || fieldKind == reflect.Array:
    +		jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
    +		return jsonName, modelDescription, prop
    +	case fieldKind == reflect.Ptr:
    +		jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
    +		return jsonName, modelDescription, prop
    +	case fieldKind == reflect.String:
    +		stringt := "string"
    +		prop.Type = &stringt
    +		return jsonName, modelDescription, prop
    +	case fieldKind == reflect.Map:
    +		// if it's a map, it's unstructured, and swagger 1.2 can't handle it
    +		objectType := "object"
    +		prop.Type = &objectType
    +		return jsonName, modelDescription, prop
    +	}
    +
    +	if b.isPrimitiveType(fieldType.String()) {
    +		mapped := b.jsonSchemaType(fieldType.String())
    +		prop.Type = &mapped
    +		prop.Format = b.jsonSchemaFormat(fieldType.String())
    +		return jsonName, modelDescription, prop
    +	}
    +	modelType := fieldType.String()
    +	prop.Ref = &modelType
    +
    +	if fieldType.Name() == "" { // override type of anonymous structs
    +		nestedTypeName := modelName + "." + jsonName
    +		prop.Ref = &nestedTypeName
    +		b.addModel(fieldType, nestedTypeName)
    +	}
    +	return jsonName, modelDescription, prop
    +}
    +
    +func hasNamedJSONTag(field reflect.StructField) bool {
    +	parts := strings.Split(field.Tag.Get("json"), ",")
    +	if len(parts) == 0 {
    +		return false
    +	}
    +	for _, s := range parts[1:] {
    +		if s == "inline" {
    +			return false
    +		}
    +	}
    +	return len(parts[0]) > 0
    +}
    +
    +func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
    +	prop.setPropertyMetadata(field)
    +	// Check for type override in tag
    +	if prop.Type != nil {
    +		return jsonName, prop
    +	}
    +	fieldType := field.Type
    +	// check for anonymous
    +	if len(fieldType.Name()) == 0 {
    +		// anonymous
    +		anonType := model.Id + "." + jsonName
    +		b.addModel(fieldType, anonType)
    +		prop.Ref = &anonType
    +		return jsonName, prop
    +	}
    +
    +	if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
    +		// embedded struct
    +		sub := modelBuilder{new(ModelList), b.Config}
    +		sub.addModel(fieldType, "")
    +		subKey := sub.keyFrom(fieldType)
    +		// merge properties from sub
    +		subModel, _ := sub.Models.At(subKey)
    +		subModel.Properties.Do(func(k string, v ModelProperty) {
    +			model.Properties.Put(k, v)
    +			// if subModel says this property is required then include it
    +			required := false
    +			for _, each := range subModel.Required {
    +				if k == each {
    +					required = true
    +					break
    +				}
    +			}
    +			if required {
    +				model.Required = append(model.Required, k)
    +			}
    +		})
    +		// add all new referenced models
    +		sub.Models.Do(func(key string, sub Model) {
    +			if key != subKey {
    +				if _, ok := b.Models.At(key); !ok {
    +					b.Models.Put(key, sub)
    +				}
    +			}
    +		})
    +		// empty name signals skip property
    +		return "", prop
    +	}
    +	// simple struct
    +	b.addModel(fieldType, "")
    +	var pType = fieldType.String()
    +	prop.Ref = &pType
    +	return jsonName, prop
    +}
    +
    +func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
    +	// check for type override in tags
    +	prop.setPropertyMetadata(field)
    +	if prop.Type != nil {
    +		return jsonName, prop
    +	}
    +	fieldType := field.Type
    +	if fieldType.Elem().Kind() == reflect.Uint8 {
    +		stringt := "string"
    +		prop.Type = &stringt
    +		return jsonName, prop
    +	}
    +	var pType = "array"
    +	prop.Type = &pType
    +	isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
    +	elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
    +	prop.Items = new(Item)
    +	if isPrimitive {
    +		mapped := b.jsonSchemaType(elemTypeName)
    +		prop.Items.Type = &mapped
    +	} else {
    +		prop.Items.Ref = &elemTypeName
    +	}
    +	// add|overwrite model for element type
    +	if fieldType.Elem().Kind() == reflect.Ptr {
    +		fieldType = fieldType.Elem()
    +	}
    +	if !isPrimitive {
    +		b.addModel(fieldType.Elem(), elemTypeName)
    +	}
    +	return jsonName, prop
    +}
    +
    +func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
    +	prop.setPropertyMetadata(field)
    +	// Check for type override in tags
    +	if prop.Type != nil {
    +		return jsonName, prop
    +	}
    +	fieldType := field.Type
    +
    +	// override type of pointer to list-likes
    +	if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
    +		var pType = "array"
    +		prop.Type = &pType
    +		isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
    +		elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
    +		if isPrimitive {
    +			primName := b.jsonSchemaType(elemName)
    +			prop.Items = &Item{Ref: &primName}
    +		} else {
    +			prop.Items = &Item{Ref: &elemName}
    +		}
    +		if !isPrimitive {
    +			// add|overwrite model for element type
    +			b.addModel(fieldType.Elem().Elem(), elemName)
    +		}
    +	} else {
    +		// non-array, pointer type
    +		var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path
    +		if b.isPrimitiveType(fieldType.String()[1:]) {
    +			prop.Type = &pType
    +			prop.Format = b.jsonSchemaFormat(fieldType.String()[1:])
    +			return jsonName, prop
    +		}
    +		prop.Ref = &pType
    +		elemName := ""
    +		if fieldType.Elem().Name() == "" {
    +			elemName = modelName + "." + jsonName
    +			prop.Ref = &elemName
    +		}
    +		b.addModel(fieldType.Elem(), elemName)
    +	}
    +	return jsonName, prop
    +}
    +
    +func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
    +	if t.Kind() == reflect.Ptr {
    +		return t.String()[1:]
    +	}
    +	if t.Name() == "" {
    +		return modelName + "." + jsonName
    +	}
    +	return b.keyFrom(t)
    +}
    +
    +func (b modelBuilder) keyFrom(st reflect.Type) string {
    +	key := st.String()
    +	if len(st.Name()) == 0 { // unnamed type
    +		// Swagger UI has special meaning for [
    +		key = strings.Replace(key, "[]", "||", -1)
    +	}
    +	return key
    +}
    +
    +// see also https://golang.org/ref/spec#Numeric_types
    +func (b modelBuilder) isPrimitiveType(modelName string) bool {
    +	if len(modelName) == 0 {
    +		return false
    +	}
    +	return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
    +}
    +
    +// jsonNameOfField returns the name of the field as it should appear in JSON format
    +// An empty string indicates that this field is not part of the JSON representation
    +func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
    +	if jsonTag := field.Tag.Get("json"); jsonTag != "" {
    +		s := strings.Split(jsonTag, ",")
    +		if s[0] == "-" {
    +			// empty name signals skip property
    +			return ""
    +		} else if s[0] != "" {
    +			return s[0]
    +		}
    +	}
    +	return field.Name
    +}
    +
    +// see also http://json-schema.org/latest/json-schema-core.html#anchor8
    +func (b modelBuilder) jsonSchemaType(modelName string) string {
    +	schemaMap := map[string]string{
    +		"uint":   "integer",
    +		"uint8":  "integer",
    +		"uint16": "integer",
    +		"uint32": "integer",
    +		"uint64": "integer",
    +
    +		"int":   "integer",
    +		"int8":  "integer",
    +		"int16": "integer",
    +		"int32": "integer",
    +		"int64": "integer",
    +
    +		"byte":      "integer",
    +		"float64":   "number",
    +		"float32":   "number",
    +		"bool":      "boolean",
    +		"time.Time": "string",
    +	}
    +	mapped, ok := schemaMap[modelName]
    +	if !ok {
    +		return modelName // use as is (custom or struct)
    +	}
    +	return mapped
    +}
    +
    +func (b modelBuilder) jsonSchemaFormat(modelName string) string {
    +	if b.Config != nil && b.Config.SchemaFormatHandler != nil {
    +		if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
    +			return mapped
    +		}
    +	}
    +	schemaMap := map[string]string{
    +		"int":        "int32",
    +		"int32":      "int32",
    +		"int64":      "int64",
    +		"byte":       "byte",
    +		"uint":       "integer",
    +		"uint8":      "byte",
    +		"float64":    "double",
    +		"float32":    "float",
    +		"time.Time":  "date-time",
    +		"*time.Time": "date-time",
    +	}
    +	mapped, ok := schemaMap[modelName]
    +	if !ok {
    +		return "" // no format
    +	}
    +	return mapped
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_list.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_list.go
    new file mode 100644
    index 0000000..9bb6cb6
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_list.go
    @@ -0,0 +1,86 @@
    +package swagger
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +)
    +
    +// NamedModel associates a name with a Model (not using its Id)
    +type NamedModel struct {
    +	Name  string
    +	Model Model
    +}
    +
    +// ModelList encapsulates a list of NamedModel (association)
    +type ModelList struct {
    +	List []NamedModel
    +}
    +
    +// Put adds or replaces a Model by its name
    +func (l *ModelList) Put(name string, model Model) {
    +	for i, each := range l.List {
    +		if each.Name == name {
    +			// replace
    +			l.List[i] = NamedModel{name, model}
    +			return
    +		}
    +	}
    +	// add
    +	l.List = append(l.List, NamedModel{name, model})
    +}
    +
    +// At returns a Model by its name, ok is false if absent
    +func (l *ModelList) At(name string) (m Model, ok bool) {
    +	for _, each := range l.List {
    +		if each.Name == name {
    +			return each.Model, true
    +		}
    +	}
    +	return m, false
    +}
    +
    +// Do enumerates all the models, each with its assigned name
    +func (l *ModelList) Do(block func(name string, value Model)) {
    +	for _, each := range l.List {
    +		block(each.Name, each.Model)
    +	}
    +}
    +
    +// MarshalJSON writes the ModelList as if it was a map[string]Model
    +func (l ModelList) MarshalJSON() ([]byte, error) {
    +	var buf bytes.Buffer
    +	encoder := json.NewEncoder(&buf)
    +	buf.WriteString("{\n")
    +	for i, each := range l.List {
    +		buf.WriteString("\"")
    +		buf.WriteString(each.Name)
    +		buf.WriteString("\": ")
    +		encoder.Encode(each.Model)
    +		if i < len(l.List)-1 {
    +			buf.WriteString(",\n")
    +		}
    +	}
    +	buf.WriteString("}")
    +	return buf.Bytes(), nil
    +}
    +
    +// UnmarshalJSON reads back a ModelList. This is an expensive operation.
    +func (l *ModelList) UnmarshalJSON(data []byte) error {
    +	raw := map[string]interface{}{}
    +	json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
    +	for k, v := range raw {
    +		// produces JSON bytes for each value
    +		data, err := json.Marshal(v)
    +		if err != nil {
    +			return err
    +		}
    +		var m Model
    +		json.NewDecoder(bytes.NewReader(data)).Decode(&m)
    +		l.Put(k, m)
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
    new file mode 100644
    index 0000000..04fff2c
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
    @@ -0,0 +1,66 @@
    +package swagger
    +
    +import (
    +	"reflect"
    +	"strings"
    +)
    +
    +func (prop *ModelProperty) setDescription(field reflect.StructField) {
    +	if tag := field.Tag.Get("description"); tag != "" {
    +		prop.Description = tag
    +	}
    +}
    +
    +func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
    +	if tag := field.Tag.Get("default"); tag != "" {
    +		prop.DefaultValue = Special(tag)
    +	}
    +}
    +
    +func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
    +	// We use | to separate the enum values.  This value is chosen
    +	// since its unlikely to be useful in actual enumeration values.
    +	if tag := field.Tag.Get("enum"); tag != "" {
    +		prop.Enum = strings.Split(tag, "|")
    +	}
    +}
    +
    +func (prop *ModelProperty) setMaximum(field reflect.StructField) {
    +	if tag := field.Tag.Get("maximum"); tag != "" {
    +		prop.Maximum = tag
    +	}
    +}
    +
    +func (prop *ModelProperty) setType(field reflect.StructField) {
    +	if tag := field.Tag.Get("type"); tag != "" {
    +		prop.Type = &tag
    +	}
    +}
    +
    +func (prop *ModelProperty) setMinimum(field reflect.StructField) {
    +	if tag := field.Tag.Get("minimum"); tag != "" {
    +		prop.Minimum = tag
    +	}
    +}
    +
    +func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
    +	tag := field.Tag.Get("unique")
    +	switch tag {
    +	case "true":
    +		v := true
    +		prop.UniqueItems = &v
    +	case "false":
    +		v := false
    +		prop.UniqueItems = &v
    +	}
    +}
    +
    +func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
    +	prop.setDescription(field)
    +	prop.setEnumValues(field)
    +	prop.setMinimum(field)
    +	prop.setMaximum(field)
    +	prop.setUniqueItems(field)
    +	prop.setDefaultValue(field)
    +	prop.setType(field)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go
    new file mode 100644
    index 0000000..3babb19
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/model_property_list.go
    @@ -0,0 +1,87 @@
    +package swagger
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +)
    +
    +// NamedModelProperty associates a name to a ModelProperty
    +type NamedModelProperty struct {
    +	Name     string
    +	Property ModelProperty
    +}
    +
    +// ModelPropertyList encapsulates a list of NamedModelProperty (association)
    +type ModelPropertyList struct {
    +	List []NamedModelProperty
    +}
    +
    +// At returns the ModelPropety by its name unless absent, then ok is false
    +func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
    +	for _, each := range l.List {
    +		if each.Name == name {
    +			return each.Property, true
    +		}
    +	}
    +	return p, false
    +}
    +
    +// Put adds or replaces a ModelProperty with this name
    +func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
    +	// maybe replace existing
    +	for i, each := range l.List {
    +		if each.Name == name {
    +			// replace
    +			l.List[i] = NamedModelProperty{Name: name, Property: prop}
    +			return
    +		}
    +	}
    +	// add
    +	l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
    +}
    +
    +// Do enumerates all the properties, each with its assigned name
    +func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
    +	for _, each := range l.List {
    +		block(each.Name, each.Property)
    +	}
    +}
    +
    +// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
    +func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
    +	var buf bytes.Buffer
    +	encoder := json.NewEncoder(&buf)
    +	buf.WriteString("{\n")
    +	for i, each := range l.List {
    +		buf.WriteString("\"")
    +		buf.WriteString(each.Name)
    +		buf.WriteString("\": ")
    +		encoder.Encode(each.Property)
    +		if i < len(l.List)-1 {
    +			buf.WriteString(",\n")
    +		}
    +	}
    +	buf.WriteString("}")
    +	return buf.Bytes(), nil
    +}
    +
    +// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
    +func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
    +	raw := map[string]interface{}{}
    +	json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
    +	for k, v := range raw {
    +		// produces JSON bytes for each value
    +		data, err := json.Marshal(v)
    +		if err != nil {
    +			return err
    +		}
    +		var m ModelProperty
    +		json.NewDecoder(bytes.NewReader(data)).Decode(&m)
    +		l.Put(k, m)
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go
    new file mode 100644
    index 0000000..b33ccfb
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go
    @@ -0,0 +1,36 @@
    +package swagger
    +
    +// Copyright 2015 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import "github.com/emicklei/go-restful"
    +
    +type orderedRouteMap struct {
    +	elements map[string][]restful.Route
    +	keys     []string
    +}
    +
    +func newOrderedRouteMap() *orderedRouteMap {
    +	return &orderedRouteMap{
    +		elements: map[string][]restful.Route{},
    +		keys:     []string{},
    +	}
    +}
    +
    +func (o *orderedRouteMap) Add(key string, route restful.Route) {
    +	routes, ok := o.elements[key]
    +	if ok {
    +		routes = append(routes, route)
    +		o.elements[key] = routes
    +		return
    +	}
    +	o.elements[key] = []restful.Route{route}
    +	o.keys = append(o.keys, key)
    +}
    +
    +func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
    +	for _, k := range o.keys {
    +		block(k, o.elements[k])
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger.go
    new file mode 100644
    index 0000000..9c40833
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger.go
    @@ -0,0 +1,185 @@
    +// Package swagger implements the structures of the Swagger
    +// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
    +package swagger
    +
    +const swaggerVersion = "1.2"
    +
    +// 4.3.3 Data Type Fields
    +type DataTypeFields struct {
    +	Type         *string  `json:"type,omitempty"` // if Ref not used
    +	Ref          *string  `json:"$ref,omitempty"` // if Type not used
    +	Format       string   `json:"format,omitempty"`
    +	DefaultValue Special  `json:"defaultValue,omitempty"`
    +	Enum         []string `json:"enum,omitempty"`
    +	Minimum      string   `json:"minimum,omitempty"`
    +	Maximum      string   `json:"maximum,omitempty"`
    +	Items        *Item    `json:"items,omitempty"`
    +	UniqueItems  *bool    `json:"uniqueItems,omitempty"`
    +}
    +
    +type Special string
    +
    +// 4.3.4 Items Object
    +type Item struct {
    +	Type   *string `json:"type,omitempty"`
    +	Ref    *string `json:"$ref,omitempty"`
    +	Format string  `json:"format,omitempty"`
    +}
    +
    +// 5.1 Resource Listing
    +type ResourceListing struct {
    +	SwaggerVersion string          `json:"swaggerVersion"` // e.g 1.2
    +	Apis           []Resource      `json:"apis"`
    +	ApiVersion     string          `json:"apiVersion"`
    +	Info           Info            `json:"info"`
    +	Authorizations []Authorization `json:"authorizations,omitempty"`
    +}
    +
    +// 5.1.2 Resource Object
    +type Resource struct {
    +	Path        string `json:"path"` // relative or absolute, must start with /
    +	Description string `json:"description"`
    +}
    +
    +// 5.1.3 Info Object
    +type Info struct {
    +	Title             string `json:"title"`
    +	Description       string `json:"description"`
    +	TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
    +	Contact           string `json:"contact,omitempty"`
    +	License           string `json:"license,omitempty"`
    +	LicenseUrl        string `json:"licenseUrl,omitempty"`
    +}
    +
    +// 5.1.5
    +type Authorization struct {
    +	Type       string      `json:"type"`
    +	PassAs     string      `json:"passAs"`
    +	Keyname    string      `json:"keyname"`
    +	Scopes     []Scope     `json:"scopes"`
    +	GrantTypes []GrantType `json:"grandTypes"`
    +}
    +
    +// 5.1.6, 5.2.11
    +type Scope struct {
    +	// Required. The name of the scope.
    +	Scope string `json:"scope"`
    +	// Recommended. A short description of the scope.
    +	Description string `json:"description"`
    +}
    +
    +// 5.1.7
    +type GrantType struct {
    +	Implicit          Implicit          `json:"implicit"`
    +	AuthorizationCode AuthorizationCode `json:"authorization_code"`
    +}
    +
    +// 5.1.8 Implicit Object
    +type Implicit struct {
    +	// Required. The login endpoint definition.
    +	loginEndpoint LoginEndpoint `json:"loginEndpoint"`
    +	// An optional alternative name to standard "access_token" OAuth2 parameter.
    +	TokenName string `json:"tokenName"`
    +}
    +
    +// 5.1.9 Authorization Code Object
    +type AuthorizationCode struct {
    +	TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
    +	TokenEndpoint        TokenEndpoint        `json:"tokenEndpoint"`
    +}
    +
    +// 5.1.10 Login Endpoint Object
    +type LoginEndpoint struct {
    +	// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
    +	Url string `json:"url"`
    +}
    +
    +// 5.1.11 Token Request Endpoint Object
    +type TokenRequestEndpoint struct {
    +	// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
    +	Url string `json:"url"`
    +	// An optional alternative name to standard "client_id" OAuth2 parameter.
    +	ClientIdName string `json:"clientIdName"`
    +	// An optional alternative name to the standard "client_secret" OAuth2 parameter.
    +	ClientSecretName string `json:"clientSecretName"`
    +}
    +
    +// 5.1.12 Token Endpoint Object
    +type TokenEndpoint struct {
    +	// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
    +	Url string `json:"url"`
    +	// An optional alternative name to standard "access_token" OAuth2 parameter.
    +	TokenName string `json:"tokenName"`
    +}
    +
    +// 5.2 API Declaration
    +type ApiDeclaration struct {
    +	SwaggerVersion string          `json:"swaggerVersion"`
    +	ApiVersion     string          `json:"apiVersion"`
    +	BasePath       string          `json:"basePath"`
    +	ResourcePath   string          `json:"resourcePath"` // must start with /
    +	Info           Info            `json:"info"`
    +	Apis           []Api           `json:"apis,omitempty"`
    +	Models         ModelList       `json:"models,omitempty"`
    +	Produces       []string        `json:"produces,omitempty"`
    +	Consumes       []string        `json:"consumes,omitempty"`
    +	Authorizations []Authorization `json:"authorizations,omitempty"`
    +}
    +
    +// 5.2.2 API Object
    +type Api struct {
    +	Path        string      `json:"path"` // relative or absolute, must start with /
    +	Description string      `json:"description"`
    +	Operations  []Operation `json:"operations,omitempty"`
    +}
    +
    +// 5.2.3 Operation Object
    +type Operation struct {
    +	DataTypeFields
    +	Method           string            `json:"method"`
    +	Summary          string            `json:"summary,omitempty"`
    +	Notes            string            `json:"notes,omitempty"`
    +	Nickname         string            `json:"nickname"`
    +	Authorizations   []Authorization   `json:"authorizations,omitempty"`
    +	Parameters       []Parameter       `json:"parameters"`
    +	ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
    +	Produces         []string          `json:"produces,omitempty"`
    +	Consumes         []string          `json:"consumes,omitempty"`
    +	Deprecated       string            `json:"deprecated,omitempty"`
    +}
    +
    +// 5.2.4 Parameter Object
    +type Parameter struct {
    +	DataTypeFields
    +	ParamType     string `json:"paramType"` // path,query,body,header,form
    +	Name          string `json:"name"`
    +	Description   string `json:"description"`
    +	Required      bool   `json:"required"`
    +	AllowMultiple bool   `json:"allowMultiple"`
    +}
    +
    +// 5.2.5 Response Message Object
    +type ResponseMessage struct {
    +	Code          int    `json:"code"`
    +	Message       string `json:"message"`
    +	ResponseModel string `json:"responseModel,omitempty"`
    +}
    +
    +// 5.2.6, 5.2.7 Models Object
    +type Model struct {
    +	Id            string            `json:"id"`
    +	Description   string            `json:"description,omitempty"`
    +	Required      []string          `json:"required,omitempty"`
    +	Properties    ModelPropertyList `json:"properties"`
    +	SubTypes      []string          `json:"subTypes,omitempty"`
    +	Discriminator string            `json:"discriminator,omitempty"`
    +}
    +
    +// 5.2.8 Properties Object
    +type ModelProperty struct {
    +	DataTypeFields
    +	Description string `json:"description,omitempty"`
    +}
    +
    +// 5.2.10
    +type Authorizations map[string]Authorization
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go
    new file mode 100644
    index 0000000..05a3c7e
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go
    @@ -0,0 +1,21 @@
    +package swagger
    +
    +type SwaggerBuilder struct {
    +	SwaggerService
    +}
    +
    +func NewSwaggerBuilder(config Config) *SwaggerBuilder {
    +	return &SwaggerBuilder{*newSwaggerService(config)}
    +}
    +
    +func (sb SwaggerBuilder) ProduceListing() ResourceListing {
    +	return sb.SwaggerService.produceListing()
    +}
    +
    +func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
    +	return sb.SwaggerService.produceAllDeclarations()
    +}
    +
    +func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
    +	return sb.SwaggerService.produceDeclarations(route)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
    new file mode 100644
    index 0000000..58dd625
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
    @@ -0,0 +1,440 @@
    +package swagger
    +
    +import (
    +	"fmt"
    +
    +	"github.com/emicklei/go-restful"
    +	// "github.com/emicklei/hopwatch"
    +	"net/http"
    +	"reflect"
    +	"sort"
    +	"strings"
    +
    +	"github.com/emicklei/go-restful/log"
    +)
    +
    +type SwaggerService struct {
    +	config            Config
    +	apiDeclarationMap *ApiDeclarationList
    +}
    +
    +func newSwaggerService(config Config) *SwaggerService {
    +	sws := &SwaggerService{
    +		config:            config,
    +		apiDeclarationMap: new(ApiDeclarationList)}
    +
    +	// Build all ApiDeclarations
    +	for _, each := range config.WebServices {
    +		rootPath := each.RootPath()
    +		// skip the api service itself
    +		if rootPath != config.ApiPath {
    +			if rootPath == "" || rootPath == "/" {
    +				// use routes
    +				for _, route := range each.Routes() {
    +					entry := staticPathFromRoute(route)
    +					_, exists := sws.apiDeclarationMap.At(entry)
    +					if !exists {
    +						sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
    +					}
    +				}
    +			} else { // use root path
    +				sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
    +			}
    +		}
    +	}
    +
    +	// if specified then call the PostBuilderHandler
    +	if config.PostBuildHandler != nil {
    +		config.PostBuildHandler(sws.apiDeclarationMap)
    +	}
    +	return sws
    +}
    +
    +// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
    +var LogInfo = func(format string, v ...interface{}) {
    +	// use the restful package-wide logger
    +	log.Printf(format, v...)
    +}
    +
    +// InstallSwaggerService add the WebService that provides the API documentation of all services
    +// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
    +func InstallSwaggerService(aSwaggerConfig Config) {
    +	RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
    +}
    +
    +// RegisterSwaggerService add the WebService that provides the API documentation of all services
    +// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
    +func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
    +	sws := newSwaggerService(config)
    +	ws := new(restful.WebService)
    +	ws.Path(config.ApiPath)
    +	ws.Produces(restful.MIME_JSON)
    +	if config.DisableCORS {
    +		ws.Filter(enableCORS)
    +	}
    +	ws.Route(ws.GET("/").To(sws.getListing))
    +	ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
    +	ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
    +	ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
    +	ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
    +	ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
    +	ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
    +	ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
    +	LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
    +	wsContainer.Add(ws)
    +
    +	// Check paths for UI serving
    +	if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
    +		swaggerPathSlash := config.SwaggerPath
    +		// path must end with slash /
    +		if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
    +			LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
    +			swaggerPathSlash += "/"
    +		}
    +
    +		LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
    +		wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
    +
    +		//if we define a custom static handler use it
    +	} else if config.StaticHandler != nil && config.SwaggerPath != "" {
    +		swaggerPathSlash := config.SwaggerPath
    +		// path must end with slash /
    +		if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
    +			LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
    +			swaggerPathSlash += "/"
    +
    +		}
    +		LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
    +		wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
    +
    +	} else {
    +		LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
    +	}
    +}
    +
    +func staticPathFromRoute(r restful.Route) string {
    +	static := r.Path
    +	bracket := strings.Index(static, "{")
    +	if bracket <= 1 { // result cannot be empty
    +		return static
    +	}
    +	if bracket != -1 {
    +		static = r.Path[:bracket]
    +	}
    +	if strings.HasSuffix(static, "/") {
    +		return static[:len(static)-1]
    +	} else {
    +		return static
    +	}
    +}
    +
    +func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
    +	if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
    +		// prevent duplicate header
    +		if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
    +			resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
    +		}
    +	}
    +	chain.ProcessFilter(req, resp)
    +}
    +
    +func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
    +	listing := sws.produceListing()
    +	resp.WriteAsJson(listing)
    +}
    +
    +func (sws SwaggerService) produceListing() ResourceListing {
    +	listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
    +	sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
    +		ref := Resource{Path: k}
    +		if len(v.Apis) > 0 { // use description of first (could still be empty)
    +			ref.Description = v.Apis[0].Description
    +		}
    +		listing.Apis = append(listing.Apis, ref)
    +	})
    +	return listing
    +}
    +
    +func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
    +	decl, ok := sws.produceDeclarations(composeRootPath(req))
    +	if !ok {
    +		resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
    +		return
    +	}
    +	// unless WebServicesUrl is given
    +	if len(sws.config.WebServicesUrl) == 0 {
    +		// update base path from the actual request
    +		// TODO how to detect https? assume http for now
    +		var host string
    +		// X-Forwarded-Host or Host or Request.Host
    +		hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
    +		if !ok || len(hostvalues) == 0 {
    +			forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
    +			if !ok || len(forwarded) == 0 {
    +				// fallback to Host field
    +				host = req.Request.Host
    +			} else {
    +				host = forwarded[0]
    +			}
    +		} else {
    +			host = hostvalues[0]
    +		}
    +		// inspect Referer for the scheme (http vs https)
    +		scheme := "http"
    +		if referer := req.Request.Header["Referer"]; len(referer) > 0 {
    +			if strings.HasPrefix(referer[0], "https") {
    +				scheme = "https"
    +			}
    +		}
    +		decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
    +	}
    +	resp.WriteAsJson(decl)
    +}
    +
    +func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
    +	decls := map[string]ApiDeclaration{}
    +	sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
    +		decls[k] = v
    +	})
    +	return decls
    +}
    +
    +func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
    +	decl, ok := sws.apiDeclarationMap.At(route)
    +	if !ok {
    +		return nil, false
    +	}
    +	decl.BasePath = sws.config.WebServicesUrl
    +	return &decl, true
    +}
    +
    +// composeDeclaration uses all routes and parameters to create a ApiDeclaration
    +func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
    +	decl := ApiDeclaration{
    +		SwaggerVersion: swaggerVersion,
    +		BasePath:       sws.config.WebServicesUrl,
    +		ResourcePath:   pathPrefix,
    +		Models:         ModelList{},
    +		ApiVersion:     ws.Version()}
    +
    +	// collect any path parameters
    +	rootParams := []Parameter{}
    +	for _, param := range ws.PathParameters() {
    +		rootParams = append(rootParams, asSwaggerParameter(param.Data()))
    +	}
    +	// aggregate by path
    +	pathToRoutes := newOrderedRouteMap()
    +	for _, other := range ws.Routes() {
    +		if strings.HasPrefix(other.Path, pathPrefix) {
    +			pathToRoutes.Add(other.Path, other)
    +		}
    +	}
    +	pathToRoutes.Do(func(path string, routes []restful.Route) {
    +		api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
    +		voidString := "void"
    +		for _, route := range routes {
    +			operation := Operation{
    +				Method:  route.Method,
    +				Summary: route.Doc,
    +				Notes:   route.Notes,
    +				// Type gets overwritten if there is a write sample
    +				DataTypeFields:   DataTypeFields{Type: &voidString},
    +				Parameters:       []Parameter{},
    +				Nickname:         route.Operation,
    +				ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
    +
    +			operation.Consumes = route.Consumes
    +			operation.Produces = route.Produces
    +
    +			// share root params if any
    +			for _, swparam := range rootParams {
    +				operation.Parameters = append(operation.Parameters, swparam)
    +			}
    +			// route specific params
    +			for _, param := range route.ParameterDocs {
    +				operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
    +			}
    +
    +			sws.addModelsFromRouteTo(&operation, route, &decl)
    +			api.Operations = append(api.Operations, operation)
    +		}
    +		decl.Apis = append(decl.Apis, api)
    +	})
    +	return decl
    +}
    +
    +func withoutWildcard(path string) string {
    +	if strings.HasSuffix(path, ":*}") {
    +		return path[0:len(path)-3] + "}"
    +	}
    +	return path
    +}
    +
    +// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
    +func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
    +	if route.ResponseErrors == nil {
    +		return messages
    +	}
    +	// sort by code
    +	codes := sort.IntSlice{}
    +	for code, _ := range route.ResponseErrors {
    +		codes = append(codes, code)
    +	}
    +	codes.Sort()
    +	for _, code := range codes {
    +		each := route.ResponseErrors[code]
    +		message := ResponseMessage{
    +			Code:    code,
    +			Message: each.Message,
    +		}
    +		if each.Model != nil {
    +			st := reflect.TypeOf(each.Model)
    +			isCollection, st := detectCollectionType(st)
    +			modelName := modelBuilder{}.keyFrom(st)
    +			if isCollection {
    +				modelName = "array[" + modelName + "]"
    +			}
    +			modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
    +			// reference the model
    +			message.ResponseModel = modelName
    +		}
    +		messages = append(messages, message)
    +	}
    +	return
    +}
    +
    +// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
    +func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
    +	if route.ReadSample != nil {
    +		sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
    +	}
    +	if route.WriteSample != nil {
    +		sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
    +	}
    +}
    +
    +func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
    +	isCollection := false
    +	if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
    +		st = st.Elem()
    +		isCollection = true
    +	} else {
    +		if st.Kind() == reflect.Ptr {
    +			if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
    +				st = st.Elem().Elem()
    +				isCollection = true
    +			}
    +		}
    +	}
    +	return isCollection, st
    +}
    +
    +// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
    +func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
    +	if isResponse {
    +		type_, items := asDataType(sample, &sws.config)
    +		operation.Type = type_
    +		operation.Items = items
    +	}
    +	modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
    +}
    +
    +func asSwaggerParameter(param restful.ParameterData) Parameter {
    +	return Parameter{
    +		DataTypeFields: DataTypeFields{
    +			Type:         ¶m.DataType,
    +			Format:       asFormat(param.DataType, param.DataFormat),
    +			DefaultValue: Special(param.DefaultValue),
    +		},
    +		Name:        param.Name,
    +		Description: param.Description,
    +		ParamType:   asParamType(param.Kind),
    +
    +		Required: param.Required}
    +}
    +
    +// Between 1..7 path parameters is supported
    +func composeRootPath(req *restful.Request) string {
    +	path := "/" + req.PathParameter("a")
    +	b := req.PathParameter("b")
    +	if b == "" {
    +		return path
    +	}
    +	path = path + "/" + b
    +	c := req.PathParameter("c")
    +	if c == "" {
    +		return path
    +	}
    +	path = path + "/" + c
    +	d := req.PathParameter("d")
    +	if d == "" {
    +		return path
    +	}
    +	path = path + "/" + d
    +	e := req.PathParameter("e")
    +	if e == "" {
    +		return path
    +	}
    +	path = path + "/" + e
    +	f := req.PathParameter("f")
    +	if f == "" {
    +		return path
    +	}
    +	path = path + "/" + f
    +	g := req.PathParameter("g")
    +	if g == "" {
    +		return path
    +	}
    +	return path + "/" + g
    +}
    +
    +func asFormat(dataType string, dataFormat string) string {
    +	if dataFormat != "" {
    +		return dataFormat
    +	}
    +	return "" // TODO
    +}
    +
    +func asParamType(kind int) string {
    +	switch {
    +	case kind == restful.PathParameterKind:
    +		return "path"
    +	case kind == restful.QueryParameterKind:
    +		return "query"
    +	case kind == restful.BodyParameterKind:
    +		return "body"
    +	case kind == restful.HeaderParameterKind:
    +		return "header"
    +	case kind == restful.FormParameterKind:
    +		return "form"
    +	}
    +	return ""
    +}
    +
    +func asDataType(any interface{}, config *Config) (*string, *Item) {
    +	// If it's not a collection, return the suggested model name
    +	st := reflect.TypeOf(any)
    +	isCollection, st := detectCollectionType(st)
    +	modelName := modelBuilder{}.keyFrom(st)
    +	// if it's not a collection we are done
    +	if !isCollection {
    +		return &modelName, nil
    +	}
    +
    +	// XXX: This is not very elegant
    +	// We create an Item object referring to the given model
    +	models := ModelList{}
    +	mb := modelBuilder{Models: &models, Config: config}
    +	mb.addModelFrom(any)
    +
    +	elemTypeName := mb.getElementTypeName(modelName, "", st)
    +	item := new(Item)
    +	if mb.isPrimitiveType(elemTypeName) {
    +		mapped := mb.jsonSchemaType(elemTypeName)
    +		item.Type = &mapped
    +	} else {
    +		item.Ref = &elemTypeName
    +	}
    +	tmp := "array"
    +	return &tmp, item
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/web_service.go b/src/prometheus/vendor/github.com/emicklei/go-restful/web_service.go
    new file mode 100644
    index 0000000..2a51004
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/web_service.go
    @@ -0,0 +1,268 @@
    +package restful
    +
    +import (
    +	"errors"
    +	"os"
    +	"sync"
    +
    +	"github.com/emicklei/go-restful/log"
    +)
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
    +type WebService struct {
    +	rootPath       string
    +	pathExpr       *pathExpression // cached compilation of rootPath as RegExp
    +	routes         []Route
    +	produces       []string
    +	consumes       []string
    +	pathParameters []*Parameter
    +	filters        []FilterFunction
    +	documentation  string
    +	apiVersion     string
    +
    +	dynamicRoutes bool
    +
    +	// protects 'routes' if dynamic routes are enabled
    +	routesLock sync.RWMutex
    +}
    +
    +func (w *WebService) SetDynamicRoutes(enable bool) {
    +	w.dynamicRoutes = enable
    +}
    +
    +// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
    +func (w *WebService) compilePathExpression() {
    +	compiled, err := newPathExpression(w.rootPath)
    +	if err != nil {
    +		log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
    +		os.Exit(1)
    +	}
    +	w.pathExpr = compiled
    +}
    +
    +// ApiVersion sets the API version for documentation purposes.
    +func (w *WebService) ApiVersion(apiVersion string) *WebService {
    +	w.apiVersion = apiVersion
    +	return w
    +}
    +
    +// Version returns the API version for documentation purposes.
    +func (w *WebService) Version() string { return w.apiVersion }
    +
    +// Path specifies the root URL template path of the WebService.
    +// All Routes will be relative to this path.
    +func (w *WebService) Path(root string) *WebService {
    +	w.rootPath = root
    +	if len(w.rootPath) == 0 {
    +		w.rootPath = "/"
    +	}
    +	w.compilePathExpression()
    +	return w
    +}
    +
    +// Param adds a PathParameter to document parameters used in the root path.
    +func (w *WebService) Param(parameter *Parameter) *WebService {
    +	if w.pathParameters == nil {
    +		w.pathParameters = []*Parameter{}
    +	}
    +	w.pathParameters = append(w.pathParameters, parameter)
    +	return w
    +}
    +
    +// PathParameter creates a new Parameter of kind Path for documentation purposes.
    +// It is initialized as required with string as its DataType.
    +func (w *WebService) PathParameter(name, description string) *Parameter {
    +	return PathParameter(name, description)
    +}
    +
    +// PathParameter creates a new Parameter of kind Path for documentation purposes.
    +// It is initialized as required with string as its DataType.
    +func PathParameter(name, description string) *Parameter {
    +	p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
    +	p.bePath()
    +	return p
    +}
    +
    +// QueryParameter creates a new Parameter of kind Query for documentation purposes.
    +// It is initialized as not required with string as its DataType.
    +func (w *WebService) QueryParameter(name, description string) *Parameter {
    +	return QueryParameter(name, description)
    +}
    +
    +// QueryParameter creates a new Parameter of kind Query for documentation purposes.
    +// It is initialized as not required with string as its DataType.
    +func QueryParameter(name, description string) *Parameter {
    +	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
    +	p.beQuery()
    +	return p
    +}
    +
    +// BodyParameter creates a new Parameter of kind Body for documentation purposes.
    +// It is initialized as required without a DataType.
    +func (w *WebService) BodyParameter(name, description string) *Parameter {
    +	return BodyParameter(name, description)
    +}
    +
    +// BodyParameter creates a new Parameter of kind Body for documentation purposes.
    +// It is initialized as required without a DataType.
    +func BodyParameter(name, description string) *Parameter {
    +	p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
    +	p.beBody()
    +	return p
    +}
    +
    +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
    +// It is initialized as not required with string as its DataType.
    +func (w *WebService) HeaderParameter(name, description string) *Parameter {
    +	return HeaderParameter(name, description)
    +}
    +
    +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
    +// It is initialized as not required with string as its DataType.
    +func HeaderParameter(name, description string) *Parameter {
    +	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
    +	p.beHeader()
    +	return p
    +}
    +
    +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
    +// It is initialized as required with string as its DataType.
    +func (w *WebService) FormParameter(name, description string) *Parameter {
    +	return FormParameter(name, description)
    +}
    +
    +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
    +// It is initialized as required with string as its DataType.
    +func FormParameter(name, description string) *Parameter {
    +	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
    +	p.beForm()
    +	return p
    +}
    +
    +// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
    +func (w *WebService) Route(builder *RouteBuilder) *WebService {
    +	w.routesLock.Lock()
    +	defer w.routesLock.Unlock()
    +	builder.copyDefaults(w.produces, w.consumes)
    +	w.routes = append(w.routes, builder.Build())
    +	return w
    +}
    +
    +// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
    +func (w *WebService) RemoveRoute(path, method string) error {
    +	if !w.dynamicRoutes {
    +		return errors.New("dynamic routes are not enabled.")
    +	}
    +	w.routesLock.Lock()
    +	defer w.routesLock.Unlock()
    +	newRoutes := make([]Route, (len(w.routes) - 1))
    +	current := 0
    +	for ix := range w.routes {
    +		if w.routes[ix].Method == method && w.routes[ix].Path == path {
    +			continue
    +		}
    +		newRoutes[current] = w.routes[ix]
    +		current = current + 1
    +	}
    +	w.routes = newRoutes
    +	return nil
    +}
    +
    +// Method creates a new RouteBuilder and initialize its http method
    +func (w *WebService) Method(httpMethod string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
    +}
    +
    +// Produces specifies that this WebService can produce one or more MIME types.
    +// Http requests must have one of these values set for the Accept header.
    +func (w *WebService) Produces(contentTypes ...string) *WebService {
    +	w.produces = contentTypes
    +	return w
    +}
    +
    +// Consumes specifies that this WebService can consume one or more MIME types.
    +// Http requests must have one of these values set for the Content-Type header.
    +func (w *WebService) Consumes(accepts ...string) *WebService {
    +	w.consumes = accepts
    +	return w
    +}
    +
    +// Routes returns the Routes associated with this WebService
    +func (w *WebService) Routes() []Route {
    +	if !w.dynamicRoutes {
    +		return w.routes
    +	}
    +	// Make a copy of the array to prevent concurrency problems
    +	w.routesLock.RLock()
    +	defer w.routesLock.RUnlock()
    +	result := make([]Route, len(w.routes))
    +	for ix := range w.routes {
    +		result[ix] = w.routes[ix]
    +	}
    +	return result
    +}
    +
    +// RootPath returns the RootPath associated with this WebService. Default "/"
    +func (w *WebService) RootPath() string {
    +	return w.rootPath
    +}
    +
    +// PathParameters return the path parameter names for (shared amoung its Routes)
    +func (w *WebService) PathParameters() []*Parameter {
    +	return w.pathParameters
    +}
    +
    +// Filter adds a filter function to the chain of filters applicable to all its Routes
    +func (w *WebService) Filter(filter FilterFunction) *WebService {
    +	w.filters = append(w.filters, filter)
    +	return w
    +}
    +
    +// Doc is used to set the documentation of this service.
    +func (w *WebService) Doc(plainText string) *WebService {
    +	w.documentation = plainText
    +	return w
    +}
    +
    +// Documentation returns it.
    +func (w *WebService) Documentation() string {
    +	return w.documentation
    +}
    +
    +/*
    +	Convenience methods
    +*/
    +
    +// HEAD is a shortcut for .Method("HEAD").Path(subPath)
    +func (w *WebService) HEAD(subPath string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
    +}
    +
    +// GET is a shortcut for .Method("GET").Path(subPath)
    +func (w *WebService) GET(subPath string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
    +}
    +
    +// POST is a shortcut for .Method("POST").Path(subPath)
    +func (w *WebService) POST(subPath string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
    +}
    +
    +// PUT is a shortcut for .Method("PUT").Path(subPath)
    +func (w *WebService) PUT(subPath string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
    +}
    +
    +// PATCH is a shortcut for .Method("PATCH").Path(subPath)
    +func (w *WebService) PATCH(subPath string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
    +}
    +
    +// DELETE is a shortcut for .Method("DELETE").Path(subPath)
    +func (w *WebService) DELETE(subPath string) *RouteBuilder {
    +	return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
    +}
    diff --git a/src/prometheus/vendor/github.com/emicklei/go-restful/web_service_container.go b/src/prometheus/vendor/github.com/emicklei/go-restful/web_service_container.go
    new file mode 100644
    index 0000000..c9d31b0
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/emicklei/go-restful/web_service_container.go
    @@ -0,0 +1,39 @@
    +package restful
    +
    +// Copyright 2013 Ernest Micklei. All rights reserved.
    +// Use of this source code is governed by a license
    +// that can be found in the LICENSE file.
    +
    +import (
    +	"net/http"
    +)
    +
    +// DefaultContainer is a restful.Container that uses http.DefaultServeMux
    +var DefaultContainer *Container
    +
    +func init() {
    +	DefaultContainer = NewContainer()
    +	DefaultContainer.ServeMux = http.DefaultServeMux
    +}
    +
    +// If set the true then panics will not be caught to return HTTP 500.
    +// In that case, Route functions are responsible for handling any error situation.
    +// Default value is false = recover from panics. This has performance implications.
    +// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
    +var DoNotRecover = false
    +
    +// Add registers a new WebService add it to the DefaultContainer.
    +func Add(service *WebService) {
    +	DefaultContainer.Add(service)
    +}
    +
    +// Filter appends a container FilterFunction from the DefaultContainer.
    +// These are called before dispatching a http.Request to a WebService.
    +func Filter(filter FilterFunction) {
    +	DefaultContainer.Filter(filter)
    +}
    +
    +// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
    +func RegisteredWebServices() []*WebService {
    +	return DefaultContainer.RegisteredWebServices()
    +}
    diff --git a/src/prometheus/vendor/github.com/ghodss/yaml/LICENSE b/src/prometheus/vendor/github.com/ghodss/yaml/LICENSE
    new file mode 100644
    index 0000000..7805d36
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/ghodss/yaml/LICENSE
    @@ -0,0 +1,50 @@
    +The MIT License (MIT)
    +
    +Copyright (c) 2014 Sam Ghods
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
    +
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    diff --git a/src/prometheus/vendor/github.com/ghodss/yaml/fields.go b/src/prometheus/vendor/github.com/ghodss/yaml/fields.go
    new file mode 100644
    index 0000000..0bd3c2b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/ghodss/yaml/fields.go
    @@ -0,0 +1,497 @@
    +// Copyright 2013 The Go Authors. All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +package yaml
    +
    +import (
    +	"bytes"
    +	"encoding"
    +	"encoding/json"
    +	"reflect"
    +	"sort"
    +	"strings"
    +	"sync"
    +	"unicode"
    +	"unicode/utf8"
    +)
    +
    +// indirect walks down v allocating pointers as needed,
    +// until it gets to a non-pointer.
    +// if it encounters an Unmarshaler, indirect stops and returns that.
    +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
    +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
    +	// If v is a named type and is addressable,
    +	// start with its address, so that if the type has pointer methods,
    +	// we find them.
    +	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
    +		v = v.Addr()
    +	}
    +	for {
    +		// Load value from interface, but only if the result will be
    +		// usefully addressable.
    +		if v.Kind() == reflect.Interface && !v.IsNil() {
    +			e := v.Elem()
    +			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
    +				v = e
    +				continue
    +			}
    +		}
    +
    +		if v.Kind() != reflect.Ptr {
    +			break
    +		}
    +
    +		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
    +			break
    +		}
    +		if v.IsNil() {
    +			v.Set(reflect.New(v.Type().Elem()))
    +		}
    +		if v.Type().NumMethod() > 0 {
    +			if u, ok := v.Interface().(json.Unmarshaler); ok {
    +				return u, nil, reflect.Value{}
    +			}
    +			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
    +				return nil, u, reflect.Value{}
    +			}
    +		}
    +		v = v.Elem()
    +	}
    +	return nil, nil, v
    +}
    +
    +// A field represents a single field found in a struct.
    +type field struct {
    +	name      string
    +	nameBytes []byte                 // []byte(name)
    +	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
    +
    +	tag       bool
    +	index     []int
    +	typ       reflect.Type
    +	omitEmpty bool
    +	quoted    bool
    +}
    +
    +func fillField(f field) field {
    +	f.nameBytes = []byte(f.name)
    +	f.equalFold = foldFunc(f.nameBytes)
    +	return f
    +}
    +
    +// byName sorts field by name, breaking ties with depth,
    +// then breaking ties with "name came from json tag", then
    +// breaking ties with index sequence.
    +type byName []field
    +
    +func (x byName) Len() int { return len(x) }
    +
    +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
    +
    +func (x byName) Less(i, j int) bool {
    +	if x[i].name != x[j].name {
    +		return x[i].name < x[j].name
    +	}
    +	if len(x[i].index) != len(x[j].index) {
    +		return len(x[i].index) < len(x[j].index)
    +	}
    +	if x[i].tag != x[j].tag {
    +		return x[i].tag
    +	}
    +	return byIndex(x).Less(i, j)
    +}
    +
    +// byIndex sorts field by index sequence.
    +type byIndex []field
    +
    +func (x byIndex) Len() int { return len(x) }
    +
    +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
    +
    +func (x byIndex) Less(i, j int) bool {
    +	for k, xik := range x[i].index {
    +		if k >= len(x[j].index) {
    +			return false
    +		}
    +		if xik != x[j].index[k] {
    +			return xik < x[j].index[k]
    +		}
    +	}
    +	return len(x[i].index) < len(x[j].index)
    +}
    +
    +// typeFields returns a list of fields that JSON should recognize for the given type.
    +// The algorithm is breadth-first search over the set of structs to include - the top struct
    +// and then any reachable anonymous structs.
    +func typeFields(t reflect.Type) []field {
    +	// Anonymous fields to explore at the current level and the next.
    +	current := []field{}
    +	next := []field{{typ: t}}
    +
    +	// Count of queued names for current level and the next.
    +	count := map[reflect.Type]int{}
    +	nextCount := map[reflect.Type]int{}
    +
    +	// Types already visited at an earlier level.
    +	visited := map[reflect.Type]bool{}
    +
    +	// Fields found.
    +	var fields []field
    +
    +	for len(next) > 0 {
    +		current, next = next, current[:0]
    +		count, nextCount = nextCount, map[reflect.Type]int{}
    +
    +		for _, f := range current {
    +			if visited[f.typ] {
    +				continue
    +			}
    +			visited[f.typ] = true
    +
    +			// Scan f.typ for fields to include.
    +			for i := 0; i < f.typ.NumField(); i++ {
    +				sf := f.typ.Field(i)
    +				if sf.PkgPath != "" { // unexported
    +					continue
    +				}
    +				tag := sf.Tag.Get("json")
    +				if tag == "-" {
    +					continue
    +				}
    +				name, opts := parseTag(tag)
    +				if !isValidTag(name) {
    +					name = ""
    +				}
    +				index := make([]int, len(f.index)+1)
    +				copy(index, f.index)
    +				index[len(f.index)] = i
    +
    +				ft := sf.Type
    +				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
    +					// Follow pointer.
    +					ft = ft.Elem()
    +				}
    +
    +				// Record found field and index sequence.
    +				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
    +					tagged := name != ""
    +					if name == "" {
    +						name = sf.Name
    +					}
    +					fields = append(fields, fillField(field{
    +						name:      name,
    +						tag:       tagged,
    +						index:     index,
    +						typ:       ft,
    +						omitEmpty: opts.Contains("omitempty"),
    +						quoted:    opts.Contains("string"),
    +					}))
    +					if count[f.typ] > 1 {
    +						// If there were multiple instances, add a second,
    +						// so that the annihilation code will see a duplicate.
    +						// It only cares about the distinction between 1 or 2,
    +						// so don't bother generating any more copies.
    +						fields = append(fields, fields[len(fields)-1])
    +					}
    +					continue
    +				}
    +
    +				// Record new anonymous struct to explore in next round.
    +				nextCount[ft]++
    +				if nextCount[ft] == 1 {
    +					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
    +				}
    +			}
    +		}
    +	}
    +
    +	sort.Sort(byName(fields))
    +
    +	// Delete all fields that are hidden by the Go rules for embedded fields,
    +	// except that fields with JSON tags are promoted.
    +
    +	// The fields are sorted in primary order of name, secondary order
    +	// of field index length. Loop over names; for each name, delete
    +	// hidden fields by choosing the one dominant field that survives.
    +	out := fields[:0]
    +	for advance, i := 0, 0; i < len(fields); i += advance {
    +		// One iteration per name.
    +		// Find the sequence of fields with the name of this first field.
    +		fi := fields[i]
    +		name := fi.name
    +		for advance = 1; i+advance < len(fields); advance++ {
    +			fj := fields[i+advance]
    +			if fj.name != name {
    +				break
    +			}
    +		}
    +		if advance == 1 { // Only one field with this name
    +			out = append(out, fi)
    +			continue
    +		}
    +		dominant, ok := dominantField(fields[i : i+advance])
    +		if ok {
    +			out = append(out, dominant)
    +		}
    +	}
    +
    +	fields = out
    +	sort.Sort(byIndex(fields))
    +
    +	return fields
    +}
    +
    +// dominantField looks through the fields, all of which are known to
    +// have the same name, to find the single field that dominates the
    +// others using Go's embedding rules, modified by the presence of
    +// JSON tags. If there are multiple top-level fields, the boolean
    +// will be false: This condition is an error in Go and we skip all
    +// the fields.
    +func dominantField(fields []field) (field, bool) {
    +	// The fields are sorted in increasing index-length order. The winner
    +	// must therefore be one with the shortest index length. Drop all
    +	// longer entries, which is easy: just truncate the slice.
    +	length := len(fields[0].index)
    +	tagged := -1 // Index of first tagged field.
    +	for i, f := range fields {
    +		if len(f.index) > length {
    +			fields = fields[:i]
    +			break
    +		}
    +		if f.tag {
    +			if tagged >= 0 {
    +				// Multiple tagged fields at the same level: conflict.
    +				// Return no field.
    +				return field{}, false
    +			}
    +			tagged = i
    +		}
    +	}
    +	if tagged >= 0 {
    +		return fields[tagged], true
    +	}
    +	// All remaining fields have the same length. If there's more than one,
    +	// we have a conflict (two fields named "X" at the same level) and we
    +	// return no field.
    +	if len(fields) > 1 {
    +		return field{}, false
    +	}
    +	return fields[0], true
    +}
    +
    +var fieldCache struct {
    +	sync.RWMutex
    +	m map[reflect.Type][]field
    +}
    +
    +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
    +func cachedTypeFields(t reflect.Type) []field {
    +	fieldCache.RLock()
    +	f := fieldCache.m[t]
    +	fieldCache.RUnlock()
    +	if f != nil {
    +		return f
    +	}
    +
    +	// Compute fields without lock.
    +	// Might duplicate effort but won't hold other computations back.
    +	f = typeFields(t)
    +	if f == nil {
    +		f = []field{}
    +	}
    +
    +	fieldCache.Lock()
    +	if fieldCache.m == nil {
    +		fieldCache.m = map[reflect.Type][]field{}
    +	}
    +	fieldCache.m[t] = f
    +	fieldCache.Unlock()
    +	return f
    +}
    +
    +func isValidTag(s string) bool {
    +	if s == "" {
    +		return false
    +	}
    +	for _, c := range s {
    +		switch {
    +		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
    +			// Backslash and quote chars are reserved, but
    +			// otherwise any punctuation chars are allowed
    +			// in a tag name.
    +		default:
    +			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
    +				return false
    +			}
    +		}
    +	}
    +	return true
    +}
    +
    +const (
    +	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
    +	kelvin       = '\u212a'
    +	smallLongEss = '\u017f'
    +)
    +
    +// foldFunc returns one of four different case folding equivalence
    +// functions, from most general (and slow) to fastest:
    +//
    +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
    +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
    +// 3) asciiEqualFold, no special, but includes non-letters (including _)
    +// 4) simpleLetterEqualFold, no specials, no non-letters.
    +//
    +// The letters S and K are special because they map to 3 runes, not just 2:
    +//  * S maps to s and to U+017F 'Å¿' Latin small letter long s
    +//  * k maps to K and to U+212A 'K' Kelvin sign
    +// See http://play.golang.org/p/tTxjOc0OGo
    +//
    +// The returned function is specialized for matching against s and
    +// should only be given s. It's not curried for performance reasons.
    +func foldFunc(s []byte) func(s, t []byte) bool {
    +	nonLetter := false
    +	special := false // special letter
    +	for _, b := range s {
    +		if b >= utf8.RuneSelf {
    +			return bytes.EqualFold
    +		}
    +		upper := b & caseMask
    +		if upper < 'A' || upper > 'Z' {
    +			nonLetter = true
    +		} else if upper == 'K' || upper == 'S' {
    +			// See above for why these letters are special.
    +			special = true
    +		}
    +	}
    +	if special {
    +		return equalFoldRight
    +	}
    +	if nonLetter {
    +		return asciiEqualFold
    +	}
    +	return simpleLetterEqualFold
    +}
    +
    +// equalFoldRight is a specialization of bytes.EqualFold when s is
    +// known to be all ASCII (including punctuation), but contains an 's',
    +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
    +// See comments on foldFunc.
    +func equalFoldRight(s, t []byte) bool {
    +	for _, sb := range s {
    +		if len(t) == 0 {
    +			return false
    +		}
    +		tb := t[0]
    +		if tb < utf8.RuneSelf {
    +			if sb != tb {
    +				sbUpper := sb & caseMask
    +				if 'A' <= sbUpper && sbUpper <= 'Z' {
    +					if sbUpper != tb&caseMask {
    +						return false
    +					}
    +				} else {
    +					return false
    +				}
    +			}
    +			t = t[1:]
    +			continue
    +		}
    +		// sb is ASCII and t is not. t must be either kelvin
    +		// sign or long s; sb must be s, S, k, or K.
    +		tr, size := utf8.DecodeRune(t)
    +		switch sb {
    +		case 's', 'S':
    +			if tr != smallLongEss {
    +				return false
    +			}
    +		case 'k', 'K':
    +			if tr != kelvin {
    +				return false
    +			}
    +		default:
    +			return false
    +		}
    +		t = t[size:]
    +
    +	}
    +	if len(t) > 0 {
    +		return false
    +	}
    +	return true
    +}
    +
    +// asciiEqualFold is a specialization of bytes.EqualFold for use when
    +// s is all ASCII (but may contain non-letters) and contains no
    +// special-folding letters.
    +// See comments on foldFunc.
    +func asciiEqualFold(s, t []byte) bool {
    +	if len(s) != len(t) {
    +		return false
    +	}
    +	for i, sb := range s {
    +		tb := t[i]
    +		if sb == tb {
    +			continue
    +		}
    +		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
    +			if sb&caseMask != tb&caseMask {
    +				return false
    +			}
    +		} else {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +// simpleLetterEqualFold is a specialization of bytes.EqualFold for
    +// use when s is all ASCII letters (no underscores, etc) and also
    +// doesn't contain 'k', 'K', 's', or 'S'.
    +// See comments on foldFunc.
    +func simpleLetterEqualFold(s, t []byte) bool {
    +	if len(s) != len(t) {
    +		return false
    +	}
    +	for i, b := range s {
    +		if b&caseMask != t[i]&caseMask {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +// tagOptions is the string following a comma in a struct field's "json"
    +// tag, or the empty string. It does not include the leading comma.
    +type tagOptions string
    +
    +// parseTag splits a struct field's json tag into its name and
    +// comma-separated options.
    +func parseTag(tag string) (string, tagOptions) {
    +	if idx := strings.Index(tag, ","); idx != -1 {
    +		return tag[:idx], tagOptions(tag[idx+1:])
    +	}
    +	return tag, tagOptions("")
    +}
    +
    +// Contains reports whether a comma-separated list of options
    +// contains a particular substr flag. substr must be surrounded by a
    +// string boundary or commas.
    +func (o tagOptions) Contains(optionName string) bool {
    +	if len(o) == 0 {
    +		return false
    +	}
    +	s := string(o)
    +	for s != "" {
    +		var next string
    +		i := strings.Index(s, ",")
    +		if i >= 0 {
    +			s, next = s[:i], s[i+1:]
    +		}
    +		if s == optionName {
    +			return true
    +		}
    +		s = next
    +	}
    +	return false
    +}
    diff --git a/src/prometheus/vendor/github.com/ghodss/yaml/yaml.go b/src/prometheus/vendor/github.com/ghodss/yaml/yaml.go
    new file mode 100644
    index 0000000..c02beac
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/ghodss/yaml/yaml.go
    @@ -0,0 +1,277 @@
    +package yaml
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"reflect"
    +	"strconv"
    +
    +	"gopkg.in/yaml.v2"
    +)
    +
    +// Marshals the object into JSON then converts JSON to YAML and returns the
    +// YAML.
    +func Marshal(o interface{}) ([]byte, error) {
    +	j, err := json.Marshal(o)
    +	if err != nil {
    +		return nil, fmt.Errorf("error marshaling into JSON: ", err)
    +	}
    +
    +	y, err := JSONToYAML(j)
    +	if err != nil {
    +		return nil, fmt.Errorf("error converting JSON to YAML: ", err)
    +	}
    +
    +	return y, nil
    +}
    +
    +// Converts YAML to JSON then uses JSON to unmarshal into an object.
    +func Unmarshal(y []byte, o interface{}) error {
    +	vo := reflect.ValueOf(o)
    +	j, err := yamlToJSON(y, &vo)
    +	if err != nil {
    +		return fmt.Errorf("error converting YAML to JSON: %v", err)
    +	}
    +
    +	err = json.Unmarshal(j, o)
    +	if err != nil {
    +		return fmt.Errorf("error unmarshaling JSON: %v", err)
    +	}
    +
    +	return nil
    +}
    +
    +// Convert JSON to YAML.
    +func JSONToYAML(j []byte) ([]byte, error) {
    +	// Convert the JSON to an object.
    +	var jsonObj interface{}
    +	// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
    +	// Go JSON library doesn't try to pick the right number type (int, float,
    +	// etc.) when unmarshling to interface{}, it just picks float64
    +	// universally. go-yaml does go through the effort of picking the right
    +	// number type, so we can preserve number type throughout this process.
    +	err := yaml.Unmarshal(j, &jsonObj)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	// Marshal this object into YAML.
    +	return yaml.Marshal(jsonObj)
    +}
    +
    +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
    +// this method should be a no-op.
    +//
    +// Things YAML can do that are not supported by JSON:
    +// * In YAML you can have binary and null keys in your maps. These are invalid
    +//   in JSON. (int and float keys are converted to strings.)
    +// * Binary data in YAML with the !!binary tag is not supported. If you want to
    +//   use binary data with this library, encode the data as base64 as usual but do
    +//   not use the !!binary tag in your YAML. This will ensure the original base64
    +//   encoded data makes it all the way through to the JSON.
    +func YAMLToJSON(y []byte) ([]byte, error) {
    +	return yamlToJSON(y, nil)
    +}
    +
    +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
    +	// Convert the YAML to an object.
    +	var yamlObj interface{}
    +	err := yaml.Unmarshal(y, &yamlObj)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	// YAML objects are not completely compatible with JSON objects (e.g. you
    +	// can have non-string keys in YAML). So, convert the YAML-compatible object
    +	// to a JSON-compatible object, failing with an error if irrecoverable
    +	// incompatibilties happen along the way.
    +	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	// Convert this object to JSON and return the data.
    +	return json.Marshal(jsonObj)
    +}
    +
    +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
    +	var err error
    +
    +	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
    +	// interface). We pass decodingNull as false because we're not actually
    +	// decoding into the value, we're just checking if the ultimate target is a
    +	// string.
    +	if jsonTarget != nil {
    +		ju, tu, pv := indirect(*jsonTarget, false)
    +		// We have a JSON or Text Umarshaler at this level, so we can't be trying
    +		// to decode into a string.
    +		if ju != nil || tu != nil {
    +			jsonTarget = nil
    +		} else {
    +			jsonTarget = &pv
    +		}
    +	}
    +
    +	// If yamlObj is a number or a boolean, check if jsonTarget is a string -
    +	// if so, coerce.  Else return normal.
    +	// If yamlObj is a map or array, find the field that each key is
    +	// unmarshaling to, and when you recurse pass the reflect.Value for that
    +	// field back into this function.
    +	switch typedYAMLObj := yamlObj.(type) {
    +	case map[interface{}]interface{}:
    +		// JSON does not support arbitrary keys in a map, so we must convert
    +		// these keys to strings.
    +		//
    +		// From my reading of go-yaml v2 (specifically the resolve function),
    +		// keys can only have the types string, int, int64, float64, binary
    +		// (unsupported), or null (unsupported).
    +		strMap := make(map[string]interface{})
    +		for k, v := range typedYAMLObj {
    +			// Resolve the key to a string first.
    +			var keyString string
    +			switch typedKey := k.(type) {
    +			case string:
    +				keyString = typedKey
    +			case int:
    +				keyString = strconv.Itoa(typedKey)
    +			case int64:
    +				// go-yaml will only return an int64 as a key if the system
    +				// architecture is 32-bit and the key's value is between 32-bit
    +				// and 64-bit. Otherwise the key type will simply be int.
    +				keyString = strconv.FormatInt(typedKey, 10)
    +			case float64:
    +				// Stolen from go-yaml to use the same conversion to string as
    +				// the go-yaml library uses to convert float to string when
    +				// Marshaling.
    +				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
    +				switch s {
    +				case "+Inf":
    +					s = ".inf"
    +				case "-Inf":
    +					s = "-.inf"
    +				case "NaN":
    +					s = ".nan"
    +				}
    +				keyString = s
    +			case bool:
    +				if typedKey {
    +					keyString = "true"
    +				} else {
    +					keyString = "false"
    +				}
    +			default:
    +				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
    +					reflect.TypeOf(k), k, v)
    +			}
    +
    +			// jsonTarget should be a struct or a map. If it's a struct, find
    +			// the field it's going to map to and pass its reflect.Value. If
    +			// it's a map, find the element type of the map and pass the
    +			// reflect.Value created from that type. If it's neither, just pass
    +			// nil - JSON conversion will error for us if it's a real issue.
    +			if jsonTarget != nil {
    +				t := *jsonTarget
    +				if t.Kind() == reflect.Struct {
    +					keyBytes := []byte(keyString)
    +					// Find the field that the JSON library would use.
    +					var f *field
    +					fields := cachedTypeFields(t.Type())
    +					for i := range fields {
    +						ff := &fields[i]
    +						if bytes.Equal(ff.nameBytes, keyBytes) {
    +							f = ff
    +							break
    +						}
    +						// Do case-insensitive comparison.
    +						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
    +							f = ff
    +						}
    +					}
    +					if f != nil {
    +						// Find the reflect.Value of the most preferential
    +						// struct field.
    +						jtf := t.Field(f.index[0])
    +						strMap[keyString], err = convertToJSONableObject(v, &jtf)
    +						if err != nil {
    +							return nil, err
    +						}
    +						continue
    +					}
    +				} else if t.Kind() == reflect.Map {
    +					// Create a zero value of the map's element type to use as
    +					// the JSON target.
    +					jtv := reflect.Zero(t.Type().Elem())
    +					strMap[keyString], err = convertToJSONableObject(v, &jtv)
    +					if err != nil {
    +						return nil, err
    +					}
    +					continue
    +				}
    +			}
    +			strMap[keyString], err = convertToJSONableObject(v, nil)
    +			if err != nil {
    +				return nil, err
    +			}
    +		}
    +		return strMap, nil
    +	case []interface{}:
    +		// We need to recurse into arrays in case there are any
    +		// map[interface{}]interface{}'s inside and to convert any
    +		// numbers to strings.
    +
    +		// If jsonTarget is a slice (which it really should be), find the
    +		// thing it's going to map to. If it's not a slice, just pass nil
    +		// - JSON conversion will error for us if it's a real issue.
    +		var jsonSliceElemValue *reflect.Value
    +		if jsonTarget != nil {
    +			t := *jsonTarget
    +			if t.Kind() == reflect.Slice {
    +				// By default slices point to nil, but we need a reflect.Value
    +				// pointing to a value of the slice type, so we create one here.
    +				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
    +				jsonSliceElemValue = &ev
    +			}
    +		}
    +
    +		// Make and use a new array.
    +		arr := make([]interface{}, len(typedYAMLObj))
    +		for i, v := range typedYAMLObj {
    +			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
    +			if err != nil {
    +				return nil, err
    +			}
    +		}
    +		return arr, nil
    +	default:
    +		// If the target type is a string and the YAML type is a number,
    +		// convert the YAML type to a string.
    +		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
    +			// Based on my reading of go-yaml, it may return int, int64,
    +			// float64, or uint64.
    +			var s string
    +			switch typedVal := typedYAMLObj.(type) {
    +			case int:
    +				s = strconv.FormatInt(int64(typedVal), 10)
    +			case int64:
    +				s = strconv.FormatInt(typedVal, 10)
    +			case float64:
    +				s = strconv.FormatFloat(typedVal, 'g', -1, 32)
    +			case uint64:
    +				s = strconv.FormatUint(typedVal, 10)
    +			case bool:
    +				if typedVal {
    +					s = "true"
    +				} else {
    +					s = "false"
    +				}
    +			}
    +			if len(s) > 0 {
    +				yamlObj = interface{}(s)
    +			}
    +		}
    +		return yamlObj, nil
    +	}
    +
    +	return nil, nil
    +}
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/LICENSE b/src/prometheus/vendor/github.com/go-ini/ini/LICENSE
    new file mode 100644
    index 0000000..37ec93a
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/LICENSE
    @@ -0,0 +1,191 @@
    +Apache License
    +Version 2.0, January 2004
    +http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +"License" shall mean the terms and conditions for use, reproduction, and
    +distribution as defined by Sections 1 through 9 of this document.
    +
    +"Licensor" shall mean the copyright owner or entity authorized by the copyright
    +owner that is granting the License.
    +
    +"Legal Entity" shall mean the union of the acting entity and all other entities
    +that control, are controlled by, or are under common control with that entity.
    +For the purposes of this definition, "control" means (i) the power, direct or
    +indirect, to cause the direction or management of such entity, whether by
    +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +"You" (or "Your") shall mean an individual or Legal Entity exercising
    +permissions granted by this License.
    +
    +"Source" form shall mean the preferred form for making modifications, including
    +but not limited to software source code, documentation source, and configuration
    +files.
    +
    +"Object" form shall mean any form resulting from mechanical transformation or
    +translation of a Source form, including but not limited to compiled object code,
    +generated documentation, and conversions to other media types.
    +
    +"Work" shall mean the work of authorship, whether in Source or Object form, made
    +available under the License, as indicated by a copyright notice that is included
    +in or attached to the work (an example is provided in the Appendix below).
    +
    +"Derivative Works" shall mean any work, whether in Source or Object form, that
    +is based on (or derived from) the Work and for which the editorial revisions,
    +annotations, elaborations, or other modifications represent, as a whole, an
    +original work of authorship. For the purposes of this License, Derivative Works
    +shall not include works that remain separable from, or merely link (or bind by
    +name) to the interfaces of, the Work and Derivative Works thereof.
    +
    +"Contribution" shall mean any work of authorship, including the original version
    +of the Work and any modifications or additions to that Work or Derivative Works
    +thereof, that is intentionally submitted to Licensor for inclusion in the Work
    +by the copyright owner or by an individual or Legal Entity authorized to submit
    +on behalf of the copyright owner. For the purposes of this definition,
    +"submitted" means any form of electronic, verbal, or written communication sent
    +to the Licensor or its representatives, including but not limited to
    +communication on electronic mailing lists, source code control systems, and
    +issue tracking systems that are managed by, or on behalf of, the Licensor for
    +the purpose of discussing and improving the Work, but excluding communication
    +that is conspicuously marked or otherwise designated in writing by the copyright
    +owner as "Not a Contribution."
    +
    +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
    +of whom a Contribution has been received by Licensor and subsequently
    +incorporated within the Work.
    +
    +2. Grant of Copyright License.
    +
    +Subject to the terms and conditions of this License, each Contributor hereby
    +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
    +irrevocable copyright license to reproduce, prepare Derivative Works of,
    +publicly display, publicly perform, sublicense, and distribute the Work and such
    +Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License.
    +
    +Subject to the terms and conditions of this License, each Contributor hereby
    +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
    +irrevocable (except as stated in this section) patent license to make, have
    +made, use, offer to sell, sell, import, and otherwise transfer the Work, where
    +such license applies only to those patent claims licensable by such Contributor
    +that are necessarily infringed by their Contribution(s) alone or by combination
    +of their Contribution(s) with the Work to which such Contribution(s) was
    +submitted. If You institute patent litigation against any entity (including a
    +cross-claim or counterclaim in a lawsuit) alleging that the Work or a
    +Contribution incorporated within the Work constitutes direct or contributory
    +patent infringement, then any patent licenses granted to You under this License
    +for that Work shall terminate as of the date such litigation is filed.
    +
    +4. Redistribution.
    +
    +You may reproduce and distribute copies of the Work or Derivative Works thereof
    +in any medium, with or without modifications, and in Source or Object form,
    +provided that You meet the following conditions:
    +
    +You must give any other recipients of the Work or Derivative Works a copy of
    +this License; and
    +You must cause any modified files to carry prominent notices stating that You
    +changed the files; and
    +You must retain, in the Source form of any Derivative Works that You distribute,
    +all copyright, patent, trademark, and attribution notices from the Source form
    +of the Work, excluding those notices that do not pertain to any part of the
    +Derivative Works; and
    +If the Work includes a "NOTICE" text file as part of its distribution, then any
    +Derivative Works that You distribute must include a readable copy of the
    +attribution notices contained within such NOTICE file, excluding those notices
    +that do not pertain to any part of the Derivative Works, in at least one of the
    +following places: within a NOTICE text file distributed as part of the
    +Derivative Works; within the Source form or documentation, if provided along
    +with the Derivative Works; or, within a display generated by the Derivative
    +Works, if and wherever such third-party notices normally appear. The contents of
    +the NOTICE file are for informational purposes only and do not modify the
    +License. You may add Your own attribution notices within Derivative Works that
    +You distribute, alongside or as an addendum to the NOTICE text from the Work,
    +provided that such additional attribution notices cannot be construed as
    +modifying the License.
    +You may add Your own copyright statement to Your modifications and may provide
    +additional or different license terms and conditions for use, reproduction, or
    +distribution of Your modifications, or for any such Derivative Works as a whole,
    +provided Your use, reproduction, and distribution of the Work otherwise complies
    +with the conditions stated in this License.
    +
    +5. Submission of Contributions.
    +
    +Unless You explicitly state otherwise, any Contribution intentionally submitted
    +for inclusion in the Work by You to the Licensor shall be under the terms and
    +conditions of this License, without any additional terms or conditions.
    +Notwithstanding the above, nothing herein shall supersede or modify the terms of
    +any separate license agreement you may have executed with Licensor regarding
    +such Contributions.
    +
    +6. Trademarks.
    +
    +This License does not grant permission to use the trade names, trademarks,
    +service marks, or product names of the Licensor, except as required for
    +reasonable and customary use in describing the origin of the Work and
    +reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty.
    +
    +Unless required by applicable law or agreed to in writing, Licensor provides the
    +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
    +including, without limitation, any warranties or conditions of TITLE,
    +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
    +solely responsible for determining the appropriateness of using or
    +redistributing the Work and assume any risks associated with Your exercise of
    +permissions under this License.
    +
    +8. Limitation of Liability.
    +
    +In no event and under no legal theory, whether in tort (including negligence),
    +contract, or otherwise, unless required by applicable law (such as deliberate
    +and grossly negligent acts) or agreed to in writing, shall any Contributor be
    +liable to You for damages, including any direct, indirect, special, incidental,
    +or consequential damages of any character arising as a result of this License or
    +out of the use or inability to use the Work (including but not limited to
    +damages for loss of goodwill, work stoppage, computer failure or malfunction, or
    +any and all other commercial damages or losses), even if such Contributor has
    +been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability.
    +
    +While redistributing the Work or Derivative Works thereof, You may choose to
    +offer, and charge a fee for, acceptance of support, warranty, indemnity, or
    +other liability obligations and/or rights consistent with this License. However,
    +in accepting such obligations, You may act only on Your own behalf and on Your
    +sole responsibility, not on behalf of any other Contributor, and only if You
    +agree to indemnify, defend, and hold each Contributor harmless for any liability
    +incurred by, or claims asserted against, such Contributor by reason of your
    +accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work
    +
    +To apply the Apache License to your work, attach the following boilerplate
    +notice, with the fields enclosed by brackets "[]" replaced with your own
    +identifying information. (Don't include the brackets!) The text should be
    +enclosed in the appropriate comment syntax for the file format. We also
    +recommend that a file or class name and description of purpose be included on
    +the same "printed page" as the copyright notice for easier identification within
    +third-party archives.
    +
    +   Copyright [yyyy] [name of copyright owner]
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +     http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/Makefile b/src/prometheus/vendor/github.com/go-ini/ini/Makefile
    new file mode 100644
    index 0000000..ac034e5
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/Makefile
    @@ -0,0 +1,12 @@
    +.PHONY: build test bench vet
    +
    +build: vet bench
    +
    +test:
    +	go test -v -cover -race
    +
    +bench:
    +	go test -v -cover -race -test.bench=. -test.benchmem
    +
    +vet:
    +	go vet
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/README.md b/src/prometheus/vendor/github.com/go-ini/ini/README.md
    new file mode 100644
    index 0000000..a939d75
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/README.md
    @@ -0,0 +1,703 @@
    +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini)
    +===
    +
    +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
    +
    +Package ini provides INI file read and write functionality in Go.
    +
    +[简体中文](README_ZH.md)
    +
    +## Feature
    +
    +- Load multiple data sources(`[]byte` or file) with overwrites.
    +- Read with recursion values.
    +- Read with parent-child sections.
    +- Read with auto-increment key names.
    +- Read with multiple-line values.
    +- Read with tons of helper methods.
    +- Read and convert values to Go types.
    +- Read and **WRITE** comments of sections and keys.
    +- Manipulate sections, keys and comments with ease.
    +- Keep sections and keys in order as you parse and save.
    +
    +## Installation
    +
    +To use a tagged revision:
    +
    +	go get gopkg.in/ini.v1
    +
    +To use with latest changes:
    +
    +	go get github.com/go-ini/ini
    +
    +Please add `-u` flag to update in the future.
    +
    +### Testing
    +
    +If you want to test on your machine, please apply `-t` flag:
    +
    +	go get -t gopkg.in/ini.v1
    +
    +Please add `-u` flag to update in the future.
    +
    +## Getting Started
    +
    +### Loading from data sources
    +
    +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error.
    +
    +```go
    +cfg, err := ini.Load([]byte("raw data"), "filename")
    +```
    +
    +Or start with an empty object:
    +
    +```go
    +cfg := ini.Empty()
    +```
    +
    +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
    +
    +```go
    +err := cfg.Append("other file", []byte("other raw data"))
    +```
    +
    +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
    +
    +```go
    +cfg, err := ini.LooseLoad("filename", "filename_404")
    +```
    +
    +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
    +
    +#### Ignore cases of key name
    +
    +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
    +
    +```go
    +cfg, err := ini.InsensitiveLoad("filename")
    +//...
    +
    +// sec1 and sec2 are the exactly same section object
    +sec1, err := cfg.GetSection("Section")
    +sec2, err := cfg.GetSection("SecTIOn")
    +
    +// key1 and key2 are the exactly same key object
    +key1, err := cfg.GetKey("Key")
    +key2, err := cfg.GetKey("KeY")
    +```
    +
    +#### MySQL-like boolean key 
    +
    +MySQL's configuration allows a key without value as follows:
    +
    +```ini
    +[mysqld]
    +...
    +skip-host-cache
    +skip-name-resolve
    +```
    +
    +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
    +
    +```go
    +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
    +```
    +
    +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
    +
    +### Working with sections
    +
    +To get a section, you would need to:
    +
    +```go
    +section, err := cfg.GetSection("section name")
    +```
    +
    +For a shortcut for default section, just give an empty string as name:
    +
    +```go
    +section, err := cfg.GetSection("")
    +```
    +
    +When you're pretty sure the section exists, following code could make your life easier:
    +
    +```go
    +section := cfg.Section("")
    +```
    +
    +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
    +
    +To create a new section:
    +
    +```go
    +err := cfg.NewSection("new section")
    +```
    +
    +To get a list of sections or section names:
    +
    +```go
    +sections := cfg.Sections()
    +names := cfg.SectionStrings()
    +```
    +
    +### Working with keys
    +
    +To get a key under a section:
    +
    +```go
    +key, err := cfg.Section("").GetKey("key name")
    +```
    +
    +Same rule applies to key operations:
    +
    +```go
    +key := cfg.Section("").Key("key name")
    +```
    +
    +To check if a key exists:
    +
    +```go
    +yes := cfg.Section("").HasKey("key name")
    +```
    +
    +To create a new key:
    +
    +```go
    +err := cfg.Section("").NewKey("name", "value")
    +```
    +
    +To get a list of keys or key names:
    +
    +```go
    +keys := cfg.Section("").Keys()
    +names := cfg.Section("").KeyStrings()
    +```
    +
    +To get a clone hash of keys and corresponding values:
    +
    +```go
    +hash := cfg.Section("").KeysHash()
    +```
    +
    +### Working with values
    +
    +To get a string value:
    +
    +```go
    +val := cfg.Section("").Key("key name").String()
    +```
    +
    +To validate key value on the fly:
    +
    +```go
    +val := cfg.Section("").Key("key name").Validate(func(in string) string {
    +	if len(in) == 0 {
    +		return "default"
    +	}
    +	return in
    +})
    +```
    +
    +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
    +
    +```go
    +val := cfg.Section("").Key("key name").Value()
    +```
    +
    +To check if raw value exists:
    +
    +```go
    +yes := cfg.Section("").HasValue("test value")
    +```
    +
    +To get value with types:
    +
    +```go
    +// For boolean values:
    +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
    +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
    +v, err = cfg.Section("").Key("BOOL").Bool()
    +v, err = cfg.Section("").Key("FLOAT64").Float64()
    +v, err = cfg.Section("").Key("INT").Int()
    +v, err = cfg.Section("").Key("INT64").Int64()
    +v, err = cfg.Section("").Key("UINT").Uint()
    +v, err = cfg.Section("").Key("UINT64").Uint64()
    +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
    +v, err = cfg.Section("").Key("TIME").Time() // RFC3339
    +
    +v = cfg.Section("").Key("BOOL").MustBool()
    +v = cfg.Section("").Key("FLOAT64").MustFloat64()
    +v = cfg.Section("").Key("INT").MustInt()
    +v = cfg.Section("").Key("INT64").MustInt64()
    +v = cfg.Section("").Key("UINT").MustUint()
    +v = cfg.Section("").Key("UINT64").MustUint64()
    +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
    +v = cfg.Section("").Key("TIME").MustTime() // RFC3339
    +
    +// Methods start with Must also accept one argument for default value
    +// when key not found or fail to parse value to given type.
    +// Except method MustString, which you have to pass a default value.
    +
    +v = cfg.Section("").Key("String").MustString("default")
    +v = cfg.Section("").Key("BOOL").MustBool(true)
    +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
    +v = cfg.Section("").Key("INT").MustInt(10)
    +v = cfg.Section("").Key("INT64").MustInt64(99)
    +v = cfg.Section("").Key("UINT").MustUint(3)
    +v = cfg.Section("").Key("UINT64").MustUint64(6)
    +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
    +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
    +```
    +
    +What if my value is three-line long?
    +
    +```ini
    +[advance]
    +ADDRESS = """404 road,
    +NotFound, State, 5000
    +Earth"""
    +```
    +
    +Not a problem!
    +
    +```go
    +cfg.Section("advance").Key("ADDRESS").String()
    +
    +/* --- start ---
    +404 road,
    +NotFound, State, 5000
    +Earth
    +------  end  --- */
    +```
    +
    +That's cool, how about continuation lines?
    +
    +```ini
    +[advance]
    +two_lines = how about \
    +	continuation lines?
    +lots_of_lines = 1 \
    +	2 \
    +	3 \
    +	4
    +```
    +
    +Piece of cake!
    +
    +```go
    +cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
    +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
    +```
    +
    +Well, I hate continuation lines, how do I disable that?
    +
    +```go
    +cfg, err := ini.LoadSources(ini.LoadOptions{
    +	IgnoreContinuation: true,
    +}, "filename")
    +```
    +
    +Holy crap! 
    +
    +Note that single quotes around values will be stripped:
    +
    +```ini
    +foo = "some value" // foo: some value
    +bar = 'some value' // bar: some value
    +```
    +
    +That's all? Hmm, no.
    +
    +#### Helper methods of working with values
    +
    +To get value with given candidates:
    +
    +```go
    +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
    +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
    +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
    +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
    +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
    +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
    +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
    +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
    +```
    +
    +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
    +
    +To validate value in a given range:
    +
    +```go
    +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
    +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
    +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
    +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
    +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
    +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
    +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
    +```
    +
    +##### Auto-split values into a slice
    +
    +To use zero value of type for invalid inputs:
    +
    +```go
    +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
    +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
    +vals = cfg.Section("").Key("STRINGS").Strings(",")
    +vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
    +vals = cfg.Section("").Key("INTS").Ints(",")
    +vals = cfg.Section("").Key("INT64S").Int64s(",")
    +vals = cfg.Section("").Key("UINTS").Uints(",")
    +vals = cfg.Section("").Key("UINT64S").Uint64s(",")
    +vals = cfg.Section("").Key("TIMES").Times(",")
    +```
    +
    +To exclude invalid values out of result slice:
    +
    +```go
    +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
    +// Input: how, 2.2, are, you -> [2.2]
    +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
    +vals = cfg.Section("").Key("INTS").ValidInts(",")
    +vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
    +vals = cfg.Section("").Key("UINTS").ValidUints(",")
    +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
    +vals = cfg.Section("").Key("TIMES").ValidTimes(",")
    +```
    +
    +Or to return nothing but error when have invalid inputs:
    +
    +```go
    +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
    +// Input: how, 2.2, are, you -> error
    +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
    +vals = cfg.Section("").Key("INTS").StrictInts(",")
    +vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
    +vals = cfg.Section("").Key("UINTS").StrictUints(",")
    +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
    +vals = cfg.Section("").Key("TIMES").StrictTimes(",")
    +```
    +
    +### Save your configuration
    +
    +Finally, it's time to save your configuration to somewhere.
    +
    +A typical way to save configuration is writing it to a file:
    +
    +```go
    +// ...
    +err = cfg.SaveTo("my.ini")
    +err = cfg.SaveToIndent("my.ini", "\t")
    +```
    +
    +Another way to save is writing to a `io.Writer` interface:
    +
    +```go
    +// ...
    +cfg.WriteTo(writer)
    +cfg.WriteToIndent(writer, "\t")
    +```
    +
    +## Advanced Usage
    +
    +### Recursive Values
    +
    +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
    +
    +```ini
    +NAME = ini
    +
    +[author]
    +NAME = Unknwon
    +GITHUB = https://github.com/%(NAME)s
    +
    +[package]
    +FULL_NAME = github.com/go-ini/%(NAME)s
    +```
    +
    +```go
    +cfg.Section("author").Key("GITHUB").String()		// https://github.com/Unknwon
    +cfg.Section("package").Key("FULL_NAME").String()	// github.com/go-ini/ini
    +```
    +
    +### Parent-child Sections
    +
    +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
    +
    +```ini
    +NAME = ini
    +VERSION = v1
    +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
    +
    +[package]
    +CLONE_URL = https://%(IMPORT_PATH)s
    +
    +[package.sub]
    +```
    +
    +```go
    +cfg.Section("package.sub").Key("CLONE_URL").String()	// https://gopkg.in/ini.v1
    +```
    +
    +#### Retrieve parent keys available to a child section
    +
    +```go
    +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
    +```
    +
    +### Auto-increment Key Names
    +
    +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
    +
    +```ini
    +[features]
    +-: Support read/write comments of keys and sections
    +-: Support auto-increment of key names
    +-: Support load multiple files to overwrite key values
    +```
    +
    +```go
    +cfg.Section("features").KeyStrings()	// []{"#1", "#2", "#3"}
    +```
    +
    +### Map To Struct
    +
    +Want more objective way to play with INI? Cool.
    +
    +```ini
    +Name = Unknwon
    +age = 21
    +Male = true
    +Born = 1993-01-01T20:17:05Z
    +
    +[Note]
    +Content = Hi is a good man!
    +Cities = HangZhou, Boston
    +```
    +
    +```go
    +type Note struct {
    +	Content string
    +	Cities  []string
    +}
    +
    +type Person struct {
    +	Name string
    +	Age  int `ini:"age"`
    +	Male bool
    +	Born time.Time
    +	Note
    +	Created time.Time `ini:"-"`
    +}
    +
    +func main() {
    +	cfg, err := ini.Load("path/to/ini")
    +	// ...
    +	p := new(Person)
    +	err = cfg.MapTo(p)
    +	// ...
    +
    +	// Things can be simpler.
    +	err = ini.MapTo(p, "path/to/ini")
    +	// ...
    +
    +	// Just map a section? Fine.
    +	n := new(Note)
    +	err = cfg.Section("Note").MapTo(n)
    +	// ...
    +}
    +```
    +
    +Can I have default value for field? Absolutely.
    +
    +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
    +
    +```go
    +// ...
    +p := &Person{
    +	Name: "Joe",
    +}
    +// ...
    +```
    +
    +It's really cool, but what's the point if you can't give me my file back from struct?
    +
    +### Reflect From Struct
    +
    +Why not?
    +
    +```go
    +type Embeded struct {
    +	Dates  []time.Time `delim:"|"`
    +	Places []string    `ini:"places,omitempty"`
    +	None   []int       `ini:",omitempty"`
    +}
    +
    +type Author struct {
    +	Name      string `ini:"NAME"`
    +	Male      bool
    +	Age       int
    +	GPA       float64
    +	NeverMind string `ini:"-"`
    +	*Embeded
    +}
    +
    +func main() {
    +	a := &Author{"Unknwon", true, 21, 2.8, "",
    +		&Embeded{
    +			[]time.Time{time.Now(), time.Now()},
    +			[]string{"HangZhou", "Boston"},
    +			[]int{},
    +		}}
    +	cfg := ini.Empty()
    +	err = ini.ReflectFrom(cfg, a)
    +	// ...
    +}
    +```
    +
    +So, what do I get?
    +
    +```ini
    +NAME = Unknwon
    +Male = true
    +Age = 21
    +GPA = 2.8
    +
    +[Embeded]
    +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
    +places = HangZhou,Boston
    +```
    +
    +#### Name Mapper
    +
    +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
    +
    +There are 2 built-in name mappers:
    +
    +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
    +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
    +
    +To use them:
    +
    +```go
    +type Info struct {
    +	PackageName string
    +}
    +
    +func main() {
    +	err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
    +	// ...
    +
    +	cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
    +	// ...
    +	info := new(Info)
    +	cfg.NameMapper = ini.AllCapsUnderscore
    +	err = cfg.MapTo(info)
    +	// ...
    +}
    +```
    +
    +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
    +
    +#### Value Mapper
    +
    +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
    +
    +```go
    +type Env struct {
    +	Foo string `ini:"foo"`
    +}
    +
    +func main() {
    +	cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
    +	cfg.ValueMapper = os.ExpandEnv
    +	// ...
    +	env := &Env{}
    +	err = cfg.Section("env").MapTo(env)
    +}
    +```
    +
    +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
    +
    +#### Other Notes On Map/Reflect
    +
    +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
    +
    +```go
    +type Child struct {
    +	Age string
    +}
    +
    +type Parent struct {
    +	Name string
    +	Child
    +}
    +
    +type Config struct {
    +	City string
    +	Parent
    +}
    +```
    +
    +Example configuration:
    +
    +```ini
    +City = Boston
    +
    +[Parent]
    +Name = Unknwon
    +
    +[Child]
    +Age = 21
    +```
    +
    +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
    +
    +```go
    +type Child struct {
    +	Age string
    +}
    +
    +type Parent struct {
    +	Name string
    +	Child `ini:"Parent"`
    +}
    +
    +type Config struct {
    +	City string
    +	Parent
    +}
    +```
    +
    +Example configuration:
    +
    +```ini
    +City = Boston
    +
    +[Parent]
    +Name = Unknwon
    +Age = 21
    +```
    +
    +## Getting Help
    +
    +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
    +- [File An Issue](https://github.com/go-ini/ini/issues/new)
    +
    +## FAQs
    +
    +### What does `BlockMode` field do?
    +
    +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
    +
    +### Why another INI library?
    +
    +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
    +
    +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
    +
    +## License
    +
    +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/README_ZH.md b/src/prometheus/vendor/github.com/go-ini/ini/README_ZH.md
    new file mode 100644
    index 0000000..2178e47
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/README_ZH.md
    @@ -0,0 +1,690 @@
    +本包提供了 Go 语言中读写 INI 文件的功能。
    +
    +## 功能特性
    +
    +- 支持覆盖加载多个数据源(`[]byte` 或文件)
    +- 支持递归读取键值
    +- 支持读取父子分区
    +- 支持读取自增键名
    +- 支持读取多行的键值
    +- 支持大量辅助方法
    +- 支持在读取时直接转换为 Go 语言类型
    +- 支持读取和 **写入** 分区和键的注释
    +- 轻松操作分区、键值和注释
    +- 在保存文件时分区和键值会保持原有的顺序
    +
    +## 下载安装
    +
    +使用一个特定版本:
    +
    +    go get gopkg.in/ini.v1
    +
    +使用最新版:
    +
    +	go get github.com/go-ini/ini
    +
    +如需更新请添加 `-u` 选项。
    +
    +### 测试安装
    +
    +如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
    +
    +	go get -t gopkg.in/ini.v1
    +
    +如需更新请添加 `-u` 选项。
    +
    +## 开始使用
    +
    +### 从数据源加载
    +
    +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
    +
    +```go
    +cfg, err := ini.Load([]byte("raw data"), "filename")
    +```
    +
    +或者从一个空白的文件开始:
    +
    +```go
    +cfg := ini.Empty()
    +```
    +
    +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
    +
    +```go
    +err := cfg.Append("other file", []byte("other raw data"))
    +```
    +
    +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
    +
    +```go
    +cfg, err := ini.LooseLoad("filename", "filename_404")
    +```
    +
    +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
    +
    +#### 忽略键名的大小写
    +
    +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
    +
    +```go
    +cfg, err := ini.InsensitiveLoad("filename")
    +//...
    +
    +// sec1 和 sec2 指向同一个分区对象
    +sec1, err := cfg.GetSection("Section")
    +sec2, err := cfg.GetSection("SecTIOn")
    +
    +// key1 和 key2 指向同一个键对象
    +key1, err := cfg.GetKey("Key")
    +key2, err := cfg.GetKey("KeY")
    +```
    +
    +#### 类似 MySQL 配置中的布尔值键
    +
    +MySQL 的配置文件中会出现没有具体值的布尔类型的键:
    +
    +```ini
    +[mysqld]
    +...
    +skip-host-cache
    +skip-name-resolve
    +```
    +
    +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
    +
    +```go
    +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
    +```
    +
    +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
    +
    +### 操作分区(Section)
    +
    +获取指定分区:
    +
    +```go
    +section, err := cfg.GetSection("section name")
    +```
    +
    +如果您想要获取默认分区,则可以用空字符串代替分区名:
    +
    +```go
    +section, err := cfg.GetSection("")
    +```
    +
    +当您非常确定某个分区是存在的,可以使用以下简便方法:
    +
    +```go
    +section := cfg.Section("")
    +```
    +
    +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
    +
    +创建一个分区:
    +
    +```go
    +err := cfg.NewSection("new section")
    +```
    +
    +获取所有分区对象或名称:
    +
    +```go
    +sections := cfg.Sections()
    +names := cfg.SectionStrings()
    +```
    +
    +### 操作键(Key)
    +
    +获取某个分区下的键:
    +
    +```go
    +key, err := cfg.Section("").GetKey("key name")
    +```
    +
    +和分区一样,您也可以直接获取键而忽略错误处理:
    +
    +```go
    +key := cfg.Section("").Key("key name")
    +```
    +
    +判断某个键是否存在:
    +
    +```go
    +yes := cfg.Section("").HasKey("key name")
    +```
    +
    +创建一个新的键:
    +
    +```go
    +err := cfg.Section("").NewKey("name", "value")
    +```
    +
    +获取分区下的所有键或键名:
    +
    +```go
    +keys := cfg.Section("").Keys()
    +names := cfg.Section("").KeyStrings()
    +```
    +
    +获取分区下的所有键值对的克隆:
    +
    +```go
    +hash := cfg.Section("").KeysHash()
    +```
    +
    +### 操作键值(Value)
    +
    +获取一个类型为字符串(string)的值:
    +
    +```go
    +val := cfg.Section("").Key("key name").String()
    +```
    +
    +获取值的同时通过自定义函数进行处理验证:
    +
    +```go
    +val := cfg.Section("").Key("key name").Validate(func(in string) string {
    +	if len(in) == 0 {
    +		return "default"
    +	}
    +	return in
    +})
    +```
    +
    +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
    +
    +```go
    +val := cfg.Section("").Key("key name").Value()
    +```
    +
    +判断某个原值是否存在:
    +
    +```go
    +yes := cfg.Section("").HasValue("test value")
    +```
    +
    +获取其它类型的值:
    +
    +```go
    +// 布尔值的规则:
    +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
    +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
    +v, err = cfg.Section("").Key("BOOL").Bool()
    +v, err = cfg.Section("").Key("FLOAT64").Float64()
    +v, err = cfg.Section("").Key("INT").Int()
    +v, err = cfg.Section("").Key("INT64").Int64()
    +v, err = cfg.Section("").Key("UINT").Uint()
    +v, err = cfg.Section("").Key("UINT64").Uint64()
    +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
    +v, err = cfg.Section("").Key("TIME").Time() // RFC3339
    +
    +v = cfg.Section("").Key("BOOL").MustBool()
    +v = cfg.Section("").Key("FLOAT64").MustFloat64()
    +v = cfg.Section("").Key("INT").MustInt()
    +v = cfg.Section("").Key("INT64").MustInt64()
    +v = cfg.Section("").Key("UINT").MustUint()
    +v = cfg.Section("").Key("UINT64").MustUint64()
    +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
    +v = cfg.Section("").Key("TIME").MustTime() // RFC3339
    +
    +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
    +// 当键不存在或者转换失败时,则会直接返回该默认值。
    +// 但是,MustString 方法必须传递一个默认值。
    +
    +v = cfg.Seciont("").Key("String").MustString("default")
    +v = cfg.Section("").Key("BOOL").MustBool(true)
    +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
    +v = cfg.Section("").Key("INT").MustInt(10)
    +v = cfg.Section("").Key("INT64").MustInt64(99)
    +v = cfg.Section("").Key("UINT").MustUint(3)
    +v = cfg.Section("").Key("UINT64").MustUint64(6)
    +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
    +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
    +```
    +
    +如果我的值有好多行怎么办?
    +
    +```ini
    +[advance]
    +ADDRESS = """404 road,
    +NotFound, State, 5000
    +Earth"""
    +```
    +
    +嗯哼?小 case!
    +
    +```go
    +cfg.Section("advance").Key("ADDRESS").String()
    +
    +/* --- start ---
    +404 road,
    +NotFound, State, 5000
    +Earth
    +------  end  --- */
    +```
    +
    +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
    +
    +```ini
    +[advance]
    +two_lines = how about \
    +	continuation lines?
    +lots_of_lines = 1 \
    +	2 \
    +	3 \
    +	4
    +```
    +
    +简直是小菜一碟!
    +
    +```go
    +cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
    +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
    +```
    +
    +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
    +
    +```go
    +cfg, err := ini.LoadSources(ini.LoadOptions{
    +	IgnoreContinuation: true,
    +}, "filename")
    +```
    +
    +哇靠给力啊!
    +
    +需要注意的是,值两侧的单引号会被自动剔除:
    +
    +```ini
    +foo = "some value" // foo: some value
    +bar = 'some value' // bar: some value
    +```
    +
    +这就是全部了?哈哈,当然不是。
    +
    +#### 操作键值的辅助方法
    +
    +获取键值时设定候选值:
    +
    +```go
    +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
    +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
    +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
    +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
    +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
    +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
    +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
    +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
    +```
    +
    +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
    +
    +验证获取的值是否在指定范围内:
    +
    +```go
    +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
    +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
    +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
    +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
    +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
    +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
    +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
    +```
    +
    +##### 自动分割键值到切片(slice)
    +
    +当存在无效输入时,使用零值代替:
    +
    +```go
    +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
    +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
    +vals = cfg.Section("").Key("STRINGS").Strings(",")
    +vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
    +vals = cfg.Section("").Key("INTS").Ints(",")
    +vals = cfg.Section("").Key("INT64S").Int64s(",")
    +vals = cfg.Section("").Key("UINTS").Uints(",")
    +vals = cfg.Section("").Key("UINT64S").Uint64s(",")
    +vals = cfg.Section("").Key("TIMES").Times(",")
    +```
    +
    +从结果切片中剔除无效输入:
    +
    +```go
    +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
    +// Input: how, 2.2, are, you -> [2.2]
    +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
    +vals = cfg.Section("").Key("INTS").ValidInts(",")
    +vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
    +vals = cfg.Section("").Key("UINTS").ValidUints(",")
    +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
    +vals = cfg.Section("").Key("TIMES").ValidTimes(",")
    +```
    +
    +当存在无效输入时,直接返回错误:
    +
    +```go
    +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
    +// Input: how, 2.2, are, you -> error
    +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
    +vals = cfg.Section("").Key("INTS").StrictInts(",")
    +vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
    +vals = cfg.Section("").Key("UINTS").StrictUints(",")
    +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
    +vals = cfg.Section("").Key("TIMES").StrictTimes(",")
    +```
    +
    +### 保存配置
    +
    +终于到了这个时刻,是时候保存一下配置了。
    +
    +比较原始的做法是输出配置到某个文件:
    +
    +```go
    +// ...
    +err = cfg.SaveTo("my.ini")
    +err = cfg.SaveToIndent("my.ini", "\t")
    +```
    +
    +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
    +
    +```go
    +// ...
    +cfg.WriteTo(writer)
    +cfg.WriteToIndent(writer, "\t")
    +```
    +
    +### 高级用法
    +
    +#### 递归读取键值
    +
    +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
    +
    +```ini
    +NAME = ini
    +
    +[author]
    +NAME = Unknwon
    +GITHUB = https://github.com/%(NAME)s
    +
    +[package]
    +FULL_NAME = github.com/go-ini/%(NAME)s
    +```
    +
    +```go
    +cfg.Section("author").Key("GITHUB").String()		// https://github.com/Unknwon
    +cfg.Section("package").Key("FULL_NAME").String()	// github.com/go-ini/ini
    +```
    +
    +#### 读取父子分区
    +
    +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
    +
    +```ini
    +NAME = ini
    +VERSION = v1
    +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
    +
    +[package]
    +CLONE_URL = https://%(IMPORT_PATH)s
    +
    +[package.sub]
    +```
    +
    +```go
    +cfg.Section("package.sub").Key("CLONE_URL").String()	// https://gopkg.in/ini.v1
    +```
    +
    +#### 获取上级父分区下的所有键名
    +
    +```go
    +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
    +```
    +
    +#### 读取自增键名
    +
    +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
    +
    +```ini
    +[features]
    +-: Support read/write comments of keys and sections
    +-: Support auto-increment of key names
    +-: Support load multiple files to overwrite key values
    +```
    +
    +```go
    +cfg.Section("features").KeyStrings()	// []{"#1", "#2", "#3"}
    +```
    +
    +### 映射到结构
    +
    +想要使用更加面向对象的方式玩转 INI 吗?好主意。
    +
    +```ini
    +Name = Unknwon
    +age = 21
    +Male = true
    +Born = 1993-01-01T20:17:05Z
    +
    +[Note]
    +Content = Hi is a good man!
    +Cities = HangZhou, Boston
    +```
    +
    +```go
    +type Note struct {
    +	Content string
    +	Cities  []string
    +}
    +
    +type Person struct {
    +	Name string
    +	Age  int `ini:"age"`
    +	Male bool
    +	Born time.Time
    +	Note
    +	Created time.Time `ini:"-"`
    +}
    +
    +func main() {
    +	cfg, err := ini.Load("path/to/ini")
    +	// ...
    +	p := new(Person)
    +	err = cfg.MapTo(p)
    +	// ...
    +
    +	// 一切竟可以如此的简单。
    +	err = ini.MapTo(p, "path/to/ini")
    +	// ...
    +
    +	// 嗯哼?只需要映射一个分区吗?
    +	n := new(Note)
    +	err = cfg.Section("Note").MapTo(n)
    +	// ...
    +}
    +```
    +
    +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
    +
    +```go
    +// ...
    +p := &Person{
    +	Name: "Joe",
    +}
    +// ...
    +```
    +
    +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
    +
    +### 从结构反射
    +
    +可是,我有说不能吗?
    +
    +```go
    +type Embeded struct {
    +	Dates  []time.Time `delim:"|"`
    +	Places []string    `ini:"places,omitempty"`
    +	None   []int       `ini:",omitempty"`
    +}
    +
    +type Author struct {
    +	Name      string `ini:"NAME"`
    +	Male      bool
    +	Age       int
    +	GPA       float64
    +	NeverMind string `ini:"-"`
    +	*Embeded
    +}
    +
    +func main() {
    +	a := &Author{"Unknwon", true, 21, 2.8, "",
    +		&Embeded{
    +			[]time.Time{time.Now(), time.Now()},
    +			[]string{"HangZhou", "Boston"},
    +			[]int{},
    +		}}
    +	cfg := ini.Empty()
    +	err = ini.ReflectFrom(cfg, a)
    +	// ...
    +}
    +```
    +
    +瞧瞧,奇迹发生了。
    +
    +```ini
    +NAME = Unknwon
    +Male = true
    +Age = 21
    +GPA = 2.8
    +
    +[Embeded]
    +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
    +places = HangZhou,Boston
    +```
    +
    +#### 名称映射器(Name Mapper)
    +
    +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
    +
    +目前有 2 款内置的映射器:
    +
    +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
    +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
    +
    +使用方法:
    +
    +```go
    +type Info struct{
    +	PackageName string
    +}
    +
    +func main() {
    +	err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
    +	// ...
    +
    +	cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
    +	// ...
    +	info := new(Info)
    +	cfg.NameMapper = ini.AllCapsUnderscore
    +	err = cfg.MapTo(info)
    +	// ...
    +}
    +```
    +
    +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
    +
    +#### 值映射器(Value Mapper)
    +
    +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
    +
    +```go
    +type Env struct {
    +	Foo string `ini:"foo"`
    +}
    +
    +func main() {
    +	cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
    +	cfg.ValueMapper = os.ExpandEnv
    +	// ...
    +	env := &Env{}
    +	err = cfg.Section("env").MapTo(env)
    +}
    +```
    +
    +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
    +
    +#### 映射/反射的其它说明
    +
    +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
    +
    +```go
    +type Child struct {
    +	Age string
    +}
    +
    +type Parent struct {
    +	Name string
    +	Child
    +}
    +
    +type Config struct {
    +	City string
    +	Parent
    +}
    +```
    +
    +示例配置文件:
    +
    +```ini
    +City = Boston
    +
    +[Parent]
    +Name = Unknwon
    +
    +[Child]
    +Age = 21
    +```
    +
    +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
    +
    +```go
    +type Child struct {
    +	Age string
    +}
    +
    +type Parent struct {
    +	Name string
    +	Child `ini:"Parent"`
    +}
    +
    +type Config struct {
    +	City string
    +	Parent
    +}
    +```
    +
    +示例配置文件:
    +
    +```ini
    +City = Boston
    +
    +[Parent]
    +Name = Unknwon
    +Age = 21
    +```
    +
    +## 获取帮助
    +
    +- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
    +- [创建工单](https://github.com/go-ini/ini/issues/new)
    +
    +## 常见问题
    +
    +### 字段 `BlockMode` 是什么?
    +
    +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
    +
    +### 为什么要写另一个 INI 解析库?
    +
    +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
    +
    +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/error.go b/src/prometheus/vendor/github.com/go-ini/ini/error.go
    new file mode 100644
    index 0000000..80afe74
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/error.go
    @@ -0,0 +1,32 @@
    +// Copyright 2016 Unknwon
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License"): you may
    +// not use this file except in compliance with the License. You may obtain
    +// a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +// License for the specific language governing permissions and limitations
    +// under the License.
    +
    +package ini
    +
    +import (
    +	"fmt"
    +)
    +
    +type ErrDelimiterNotFound struct {
    +	Line string
    +}
    +
    +func IsErrDelimiterNotFound(err error) bool {
    +	_, ok := err.(ErrDelimiterNotFound)
    +	return ok
    +}
    +
    +func (err ErrDelimiterNotFound) Error() string {
    +	return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
    +}
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/ini.go b/src/prometheus/vendor/github.com/go-ini/ini/ini.go
    new file mode 100644
    index 0000000..cd065e7
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/ini.go
    @@ -0,0 +1,501 @@
    +// Copyright 2014 Unknwon
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License"): you may
    +// not use this file except in compliance with the License. You may obtain
    +// a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +// License for the specific language governing permissions and limitations
    +// under the License.
    +
    +// Package ini provides INI file read and write functionality in Go.
    +package ini
    +
    +import (
    +	"bytes"
    +	"errors"
    +	"fmt"
    +	"io"
    +	"os"
    +	"regexp"
    +	"runtime"
    +	"strconv"
    +	"strings"
    +	"sync"
    +	"time"
    +)
    +
    +const (
    +	// Name for default section. You can use this constant or the string literal.
    +	// In most of cases, an empty string is all you need to access the section.
    +	DEFAULT_SECTION = "DEFAULT"
    +
    +	// Maximum allowed depth when recursively substituing variable names.
    +	_DEPTH_VALUES = 99
    +	_VERSION      = "1.21.1"
    +)
    +
    +// Version returns current package version literal.
    +func Version() string {
    +	return _VERSION
    +}
    +
    +var (
    +	// Delimiter to determine or compose a new line.
    +	// This variable will be changed to "\r\n" automatically on Windows
    +	// at package init time.
    +	LineBreak = "\n"
    +
    +	// Variable regexp pattern: %(variable)s
    +	varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
    +
    +	// Indicate whether to align "=" sign with spaces to produce pretty output
    +	// or reduce all possible spaces for compact format.
    +	PrettyFormat = true
    +
    +	// Explicitly write DEFAULT section header
    +	DefaultHeader = false
    +)
    +
    +func init() {
    +	if runtime.GOOS == "windows" {
    +		LineBreak = "\r\n"
    +	}
    +}
    +
    +func inSlice(str string, s []string) bool {
    +	for _, v := range s {
    +		if str == v {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// dataSource is an interface that returns object which can be read and closed.
    +type dataSource interface {
    +	ReadCloser() (io.ReadCloser, error)
    +}
    +
    +// sourceFile represents an object that contains content on the local file system.
    +type sourceFile struct {
    +	name string
    +}
    +
    +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
    +	return os.Open(s.name)
    +}
    +
    +type bytesReadCloser struct {
    +	reader io.Reader
    +}
    +
    +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
    +	return rc.reader.Read(p)
    +}
    +
    +func (rc *bytesReadCloser) Close() error {
    +	return nil
    +}
    +
    +// sourceData represents an object that contains content in memory.
    +type sourceData struct {
    +	data []byte
    +}
    +
    +func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
    +	return &bytesReadCloser{bytes.NewReader(s.data)}, nil
    +}
    +
    +// File represents a combination of a or more INI file(s) in memory.
    +type File struct {
    +	// Should make things safe, but sometimes doesn't matter.
    +	BlockMode bool
    +	// Make sure data is safe in multiple goroutines.
    +	lock sync.RWMutex
    +
    +	// Allow combination of multiple data sources.
    +	dataSources []dataSource
    +	// Actual data is stored here.
    +	sections map[string]*Section
    +
    +	// To keep data in order.
    +	sectionList []string
    +
    +	options LoadOptions
    +
    +	NameMapper
    +	ValueMapper
    +}
    +
    +// newFile initializes File object with given data sources.
    +func newFile(dataSources []dataSource, opts LoadOptions) *File {
    +	return &File{
    +		BlockMode:   true,
    +		dataSources: dataSources,
    +		sections:    make(map[string]*Section),
    +		sectionList: make([]string, 0, 10),
    +		options:     opts,
    +	}
    +}
    +
    +func parseDataSource(source interface{}) (dataSource, error) {
    +	switch s := source.(type) {
    +	case string:
    +		return sourceFile{s}, nil
    +	case []byte:
    +		return &sourceData{s}, nil
    +	default:
    +		return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
    +	}
    +}
    +
    +type LoadOptions struct {
    +	// Loose indicates whether the parser should ignore nonexistent files or return error.
    +	Loose bool
    +	// Insensitive indicates whether the parser forces all section and key names to lowercase.
    +	Insensitive bool
    +	// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
    +	IgnoreContinuation bool
    +	// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
    +	// This type of keys are mostly used in my.cnf.
    +	AllowBooleanKeys bool
    +}
    +
    +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
    +	sources := make([]dataSource, len(others)+1)
    +	sources[0], err = parseDataSource(source)
    +	if err != nil {
    +		return nil, err
    +	}
    +	for i := range others {
    +		sources[i+1], err = parseDataSource(others[i])
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +	f := newFile(sources, opts)
    +	if err = f.Reload(); err != nil {
    +		return nil, err
    +	}
    +	return f, nil
    +}
    +
    +// Load loads and parses from INI data sources.
    +// Arguments can be mixed of file name with string type, or raw data in []byte.
    +// It will return error if list contains nonexistent files.
    +func Load(source interface{}, others ...interface{}) (*File, error) {
    +	return LoadSources(LoadOptions{}, source, others...)
    +}
    +
    +// LooseLoad has exactly same functionality as Load function
    +// except it ignores nonexistent files instead of returning error.
    +func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
    +	return LoadSources(LoadOptions{Loose: true}, source, others...)
    +}
    +
    +// InsensitiveLoad has exactly same functionality as Load function
    +// except it forces all section and key names to be lowercased.
    +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
    +	return LoadSources(LoadOptions{Insensitive: true}, source, others...)
    +}
    +
    +// Empty returns an empty file object.
    +func Empty() *File {
    +	// Ignore error here, we sure our data is good.
    +	f, _ := Load([]byte(""))
    +	return f
    +}
    +
    +// NewSection creates a new section.
    +func (f *File) NewSection(name string) (*Section, error) {
    +	if len(name) == 0 {
    +		return nil, errors.New("error creating new section: empty section name")
    +	} else if f.options.Insensitive && name != DEFAULT_SECTION {
    +		name = strings.ToLower(name)
    +	}
    +
    +	if f.BlockMode {
    +		f.lock.Lock()
    +		defer f.lock.Unlock()
    +	}
    +
    +	if inSlice(name, f.sectionList) {
    +		return f.sections[name], nil
    +	}
    +
    +	f.sectionList = append(f.sectionList, name)
    +	f.sections[name] = newSection(f, name)
    +	return f.sections[name], nil
    +}
    +
    +// NewSections creates a list of sections.
    +func (f *File) NewSections(names ...string) (err error) {
    +	for _, name := range names {
    +		if _, err = f.NewSection(name); err != nil {
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +// GetSection returns section by given name.
    +func (f *File) GetSection(name string) (*Section, error) {
    +	if len(name) == 0 {
    +		name = DEFAULT_SECTION
    +	} else if f.options.Insensitive {
    +		name = strings.ToLower(name)
    +	}
    +
    +	if f.BlockMode {
    +		f.lock.RLock()
    +		defer f.lock.RUnlock()
    +	}
    +
    +	sec := f.sections[name]
    +	if sec == nil {
    +		return nil, fmt.Errorf("section '%s' does not exist", name)
    +	}
    +	return sec, nil
    +}
    +
    +// Section assumes named section exists and returns a zero-value when not.
    +func (f *File) Section(name string) *Section {
    +	sec, err := f.GetSection(name)
    +	if err != nil {
    +		// Note: It's OK here because the only possible error is empty section name,
    +		// but if it's empty, this piece of code won't be executed.
    +		sec, _ = f.NewSection(name)
    +		return sec
    +	}
    +	return sec
    +}
    +
    +// Section returns list of Section.
    +func (f *File) Sections() []*Section {
    +	sections := make([]*Section, len(f.sectionList))
    +	for i := range f.sectionList {
    +		sections[i] = f.Section(f.sectionList[i])
    +	}
    +	return sections
    +}
    +
    +// SectionStrings returns list of section names.
    +func (f *File) SectionStrings() []string {
    +	list := make([]string, len(f.sectionList))
    +	copy(list, f.sectionList)
    +	return list
    +}
    +
    +// DeleteSection deletes a section.
    +func (f *File) DeleteSection(name string) {
    +	if f.BlockMode {
    +		f.lock.Lock()
    +		defer f.lock.Unlock()
    +	}
    +
    +	if len(name) == 0 {
    +		name = DEFAULT_SECTION
    +	}
    +
    +	for i, s := range f.sectionList {
    +		if s == name {
    +			f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
    +			delete(f.sections, name)
    +			return
    +		}
    +	}
    +}
    +
    +func (f *File) reload(s dataSource) error {
    +	r, err := s.ReadCloser()
    +	if err != nil {
    +		return err
    +	}
    +	defer r.Close()
    +
    +	return f.parse(r)
    +}
    +
    +// Reload reloads and parses all data sources.
    +func (f *File) Reload() (err error) {
    +	for _, s := range f.dataSources {
    +		if err = f.reload(s); err != nil {
    +			// In loose mode, we create an empty default section for nonexistent files.
    +			if os.IsNotExist(err) && f.options.Loose {
    +				f.parse(bytes.NewBuffer(nil))
    +				continue
    +			}
    +			return err
    +		}
    +	}
    +	return nil
    +}
    +
    +// Append appends one or more data sources and reloads automatically.
    +func (f *File) Append(source interface{}, others ...interface{}) error {
    +	ds, err := parseDataSource(source)
    +	if err != nil {
    +		return err
    +	}
    +	f.dataSources = append(f.dataSources, ds)
    +	for _, s := range others {
    +		ds, err = parseDataSource(s)
    +		if err != nil {
    +			return err
    +		}
    +		f.dataSources = append(f.dataSources, ds)
    +	}
    +	return f.Reload()
    +}
    +
    +// WriteToIndent writes content into io.Writer with given indention.
    +// If PrettyFormat has been set to be true,
    +// it will align "=" sign with spaces under each section.
    +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
    +	equalSign := "="
    +	if PrettyFormat {
    +		equalSign = " = "
    +	}
    +
    +	// Use buffer to make sure target is safe until finish encoding.
    +	buf := bytes.NewBuffer(nil)
    +	for i, sname := range f.sectionList {
    +		sec := f.Section(sname)
    +		if len(sec.Comment) > 0 {
    +			if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
    +				sec.Comment = "; " + sec.Comment
    +			}
    +			if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
    +				return 0, err
    +			}
    +		}
    +
    +		if i > 0 || DefaultHeader {
    +			if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
    +				return 0, err
    +			}
    +		} else {
    +			// Write nothing if default section is empty
    +			if len(sec.keyList) == 0 {
    +				continue
    +			}
    +		}
    +
    +		// Count and generate alignment length and buffer spaces using the
    +		// longest key. Keys may be modifed if they contain certain characters so
    +		// we need to take that into account in our calculation.
    +		alignLength := 0
    +		if PrettyFormat {
    +			for _, kname := range sec.keyList {
    +				keyLength := len(kname)
    +				// First case will surround key by ` and second by """
    +				if strings.ContainsAny(kname, "\"=:") {
    +					keyLength += 2
    +				} else if strings.Contains(kname, "`") {
    +					keyLength += 6
    +				}
    +
    +				if keyLength > alignLength {
    +					alignLength = keyLength
    +				}
    +			}
    +		}
    +		alignSpaces := bytes.Repeat([]byte(" "), alignLength)
    +
    +		for _, kname := range sec.keyList {
    +			key := sec.Key(kname)
    +			if len(key.Comment) > 0 {
    +				if len(indent) > 0 && sname != DEFAULT_SECTION {
    +					buf.WriteString(indent)
    +				}
    +				if key.Comment[0] != '#' && key.Comment[0] != ';' {
    +					key.Comment = "; " + key.Comment
    +				}
    +				if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
    +					return 0, err
    +				}
    +			}
    +
    +			if len(indent) > 0 && sname != DEFAULT_SECTION {
    +				buf.WriteString(indent)
    +			}
    +
    +			switch {
    +			case key.isAutoIncrement:
    +				kname = "-"
    +			case strings.ContainsAny(kname, "\"=:"):
    +				kname = "`" + kname + "`"
    +			case strings.Contains(kname, "`"):
    +				kname = `"""` + kname + `"""`
    +			}
    +			if _, err = buf.WriteString(kname); err != nil {
    +				return 0, err
    +			}
    +
    +			if key.isBooleanType {
    +				continue
    +			}
    +
    +			// Write out alignment spaces before "=" sign
    +			if PrettyFormat {
    +				buf.Write(alignSpaces[:alignLength-len(kname)])
    +			}
    +
    +			val := key.value
    +			// In case key value contains "\n", "`", "\"", "#" or ";"
    +			if strings.ContainsAny(val, "\n`") {
    +				val = `"""` + val + `"""`
    +			} else if strings.ContainsAny(val, "#;") {
    +				val = "`" + val + "`"
    +			}
    +			if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
    +				return 0, err
    +			}
    +		}
    +
    +		// Put a line between sections
    +		if _, err = buf.WriteString(LineBreak); err != nil {
    +			return 0, err
    +		}
    +	}
    +
    +	return buf.WriteTo(w)
    +}
    +
    +// WriteTo writes file content into io.Writer.
    +func (f *File) WriteTo(w io.Writer) (int64, error) {
    +	return f.WriteToIndent(w, "")
    +}
    +
    +// SaveToIndent writes content to file system with given value indention.
    +func (f *File) SaveToIndent(filename, indent string) error {
    +	// Note: Because we are truncating with os.Create,
    +	// 	so it's safer to save to a temporary file location and rename afte done.
    +	tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
    +	defer os.Remove(tmpPath)
    +
    +	fw, err := os.Create(tmpPath)
    +	if err != nil {
    +		return err
    +	}
    +
    +	if _, err = f.WriteToIndent(fw, indent); err != nil {
    +		fw.Close()
    +		return err
    +	}
    +	fw.Close()
    +
    +	// Remove old file and rename the new one.
    +	os.Remove(filename)
    +	return os.Rename(tmpPath, filename)
    +}
    +
    +// SaveTo writes content to file system.
    +func (f *File) SaveTo(filename string) error {
    +	return f.SaveToIndent(filename, "")
    +}
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/key.go b/src/prometheus/vendor/github.com/go-ini/ini/key.go
    new file mode 100644
    index 0000000..9738c55
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/key.go
    @@ -0,0 +1,633 @@
    +// Copyright 2014 Unknwon
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License"): you may
    +// not use this file except in compliance with the License. You may obtain
    +// a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +// License for the specific language governing permissions and limitations
    +// under the License.
    +
    +package ini
    +
    +import (
    +	"fmt"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +// Key represents a key under a section.
    +type Key struct {
    +	s               *Section
    +	name            string
    +	value           string
    +	isAutoIncrement bool
    +	isBooleanType   bool
    +
    +	Comment string
    +}
    +
    +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
    +type ValueMapper func(string) string
    +
    +// Name returns name of key.
    +func (k *Key) Name() string {
    +	return k.name
    +}
    +
    +// Value returns raw value of key for performance purpose.
    +func (k *Key) Value() string {
    +	return k.value
    +}
    +
    +// String returns string representation of value.
    +func (k *Key) String() string {
    +	val := k.value
    +	if k.s.f.ValueMapper != nil {
    +		val = k.s.f.ValueMapper(val)
    +	}
    +	if strings.Index(val, "%") == -1 {
    +		return val
    +	}
    +
    +	for i := 0; i < _DEPTH_VALUES; i++ {
    +		vr := varPattern.FindString(val)
    +		if len(vr) == 0 {
    +			break
    +		}
    +
    +		// Take off leading '%(' and trailing ')s'.
    +		noption := strings.TrimLeft(vr, "%(")
    +		noption = strings.TrimRight(noption, ")s")
    +
    +		// Search in the same section.
    +		nk, err := k.s.GetKey(noption)
    +		if err != nil {
    +			// Search again in default section.
    +			nk, _ = k.s.f.Section("").GetKey(noption)
    +		}
    +
    +		// Substitute by new value and take off leading '%(' and trailing ')s'.
    +		val = strings.Replace(val, vr, nk.value, -1)
    +	}
    +	return val
    +}
    +
    +// Validate accepts a validate function which can
    +// return modifed result as key value.
    +func (k *Key) Validate(fn func(string) string) string {
    +	return fn(k.String())
    +}
    +
    +// parseBool returns the boolean value represented by the string.
    +//
    +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
    +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
    +// Any other value returns an error.
    +func parseBool(str string) (value bool, err error) {
    +	switch str {
    +	case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
    +		return true, nil
    +	case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
    +		return false, nil
    +	}
    +	return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
    +}
    +
    +// Bool returns bool type value.
    +func (k *Key) Bool() (bool, error) {
    +	return parseBool(k.String())
    +}
    +
    +// Float64 returns float64 type value.
    +func (k *Key) Float64() (float64, error) {
    +	return strconv.ParseFloat(k.String(), 64)
    +}
    +
    +// Int returns int type value.
    +func (k *Key) Int() (int, error) {
    +	return strconv.Atoi(k.String())
    +}
    +
    +// Int64 returns int64 type value.
    +func (k *Key) Int64() (int64, error) {
    +	return strconv.ParseInt(k.String(), 10, 64)
    +}
    +
    +// Uint returns uint type valued.
    +func (k *Key) Uint() (uint, error) {
    +	u, e := strconv.ParseUint(k.String(), 10, 64)
    +	return uint(u), e
    +}
    +
    +// Uint64 returns uint64 type value.
    +func (k *Key) Uint64() (uint64, error) {
    +	return strconv.ParseUint(k.String(), 10, 64)
    +}
    +
    +// Duration returns time.Duration type value.
    +func (k *Key) Duration() (time.Duration, error) {
    +	return time.ParseDuration(k.String())
    +}
    +
    +// TimeFormat parses with given format and returns time.Time type value.
    +func (k *Key) TimeFormat(format string) (time.Time, error) {
    +	return time.Parse(format, k.String())
    +}
    +
    +// Time parses with RFC3339 format and returns time.Time type value.
    +func (k *Key) Time() (time.Time, error) {
    +	return k.TimeFormat(time.RFC3339)
    +}
    +
    +// MustString returns default value if key value is empty.
    +func (k *Key) MustString(defaultVal string) string {
    +	val := k.String()
    +	if len(val) == 0 {
    +		k.value = defaultVal
    +		return defaultVal
    +	}
    +	return val
    +}
    +
    +// MustBool always returns value without error,
    +// it returns false if error occurs.
    +func (k *Key) MustBool(defaultVal ...bool) bool {
    +	val, err := k.Bool()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = strconv.FormatBool(defaultVal[0])
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustFloat64 always returns value without error,
    +// it returns 0.0 if error occurs.
    +func (k *Key) MustFloat64(defaultVal ...float64) float64 {
    +	val, err := k.Float64()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustInt always returns value without error,
    +// it returns 0 if error occurs.
    +func (k *Key) MustInt(defaultVal ...int) int {
    +	val, err := k.Int()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustInt64 always returns value without error,
    +// it returns 0 if error occurs.
    +func (k *Key) MustInt64(defaultVal ...int64) int64 {
    +	val, err := k.Int64()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = strconv.FormatInt(defaultVal[0], 10)
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustUint always returns value without error,
    +// it returns 0 if error occurs.
    +func (k *Key) MustUint(defaultVal ...uint) uint {
    +	val, err := k.Uint()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustUint64 always returns value without error,
    +// it returns 0 if error occurs.
    +func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
    +	val, err := k.Uint64()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = strconv.FormatUint(defaultVal[0], 10)
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustDuration always returns value without error,
    +// it returns zero value if error occurs.
    +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
    +	val, err := k.Duration()
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = defaultVal[0].String()
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustTimeFormat always parses with given format and returns value without error,
    +// it returns zero value if error occurs.
    +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
    +	val, err := k.TimeFormat(format)
    +	if len(defaultVal) > 0 && err != nil {
    +		k.value = defaultVal[0].Format(format)
    +		return defaultVal[0]
    +	}
    +	return val
    +}
    +
    +// MustTime always parses with RFC3339 format and returns value without error,
    +// it returns zero value if error occurs.
    +func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
    +	return k.MustTimeFormat(time.RFC3339, defaultVal...)
    +}
    +
    +// In always returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) In(defaultVal string, candidates []string) string {
    +	val := k.String()
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InFloat64 always returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
    +	val := k.MustFloat64()
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InInt always returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InInt(defaultVal int, candidates []int) int {
    +	val := k.MustInt()
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InInt64 always returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
    +	val := k.MustInt64()
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InUint always returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
    +	val := k.MustUint()
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InUint64 always returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
    +	val := k.MustUint64()
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InTimeFormat always parses with given format and returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
    +	val := k.MustTimeFormat(format)
    +	for _, cand := range candidates {
    +		if val == cand {
    +			return val
    +		}
    +	}
    +	return defaultVal
    +}
    +
    +// InTime always parses with RFC3339 format and returns value without error,
    +// it returns default value if error occurs or doesn't fit into candidates.
    +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
    +	return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
    +}
    +
    +// RangeFloat64 checks if value is in given range inclusively,
    +// and returns default value if it's not.
    +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
    +	val := k.MustFloat64()
    +	if val < min || val > max {
    +		return defaultVal
    +	}
    +	return val
    +}
    +
    +// RangeInt checks if value is in given range inclusively,
    +// and returns default value if it's not.
    +func (k *Key) RangeInt(defaultVal, min, max int) int {
    +	val := k.MustInt()
    +	if val < min || val > max {
    +		return defaultVal
    +	}
    +	return val
    +}
    +
    +// RangeInt64 checks if value is in given range inclusively,
    +// and returns default value if it's not.
    +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
    +	val := k.MustInt64()
    +	if val < min || val > max {
    +		return defaultVal
    +	}
    +	return val
    +}
    +
    +// RangeTimeFormat checks if value with given format is in given range inclusively,
    +// and returns default value if it's not.
    +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
    +	val := k.MustTimeFormat(format)
    +	if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
    +		return defaultVal
    +	}
    +	return val
    +}
    +
    +// RangeTime checks if value with RFC3339 format is in given range inclusively,
    +// and returns default value if it's not.
    +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
    +	return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
    +}
    +
    +// Strings returns list of string divided by given delimiter.
    +func (k *Key) Strings(delim string) []string {
    +	str := k.String()
    +	if len(str) == 0 {
    +		return []string{}
    +	}
    +
    +	vals := strings.Split(str, delim)
    +	for i := range vals {
    +		vals[i] = strings.TrimSpace(vals[i])
    +	}
    +	return vals
    +}
    +
    +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
    +func (k *Key) Float64s(delim string) []float64 {
    +	vals, _ := k.getFloat64s(delim, true, false)
    +	return vals
    +}
    +
    +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
    +func (k *Key) Ints(delim string) []int {
    +	vals, _ := k.getInts(delim, true, false)
    +	return vals
    +}
    +
    +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
    +func (k *Key) Int64s(delim string) []int64 {
    +	vals, _ := k.getInt64s(delim, true, false)
    +	return vals
    +}
    +
    +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
    +func (k *Key) Uints(delim string) []uint {
    +	vals, _ := k.getUints(delim, true, false)
    +	return vals
    +}
    +
    +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
    +func (k *Key) Uint64s(delim string) []uint64 {
    +	vals, _ := k.getUint64s(delim, true, false)
    +	return vals
    +}
    +
    +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
    +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
    +func (k *Key) TimesFormat(format, delim string) []time.Time {
    +	vals, _ := k.getTimesFormat(format, delim, true, false)
    +	return vals
    +}
    +
    +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
    +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
    +func (k *Key) Times(delim string) []time.Time {
    +	return k.TimesFormat(time.RFC3339, delim)
    +}
    +
    +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
    +// it will not be included to result list.
    +func (k *Key) ValidFloat64s(delim string) []float64 {
    +	vals, _ := k.getFloat64s(delim, false, false)
    +	return vals
    +}
    +
    +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
    +// not be included to result list.
    +func (k *Key) ValidInts(delim string) []int {
    +	vals, _ := k.getInts(delim, false, false)
    +	return vals
    +}
    +
    +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
    +// then it will not be included to result list.
    +func (k *Key) ValidInt64s(delim string) []int64 {
    +	vals, _ := k.getInt64s(delim, false, false)
    +	return vals
    +}
    +
    +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
    +// then it will not be included to result list.
    +func (k *Key) ValidUints(delim string) []uint {
    +	vals, _ := k.getUints(delim, false, false)
    +	return vals
    +}
    +
    +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
    +// integer, then it will not be included to result list.
    +func (k *Key) ValidUint64s(delim string) []uint64 {
    +	vals, _ := k.getUint64s(delim, false, false)
    +	return vals
    +}
    +
    +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
    +func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
    +	vals, _ := k.getTimesFormat(format, delim, false, false)
    +	return vals
    +}
    +
    +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
    +func (k *Key) ValidTimes(delim string) []time.Time {
    +	return k.ValidTimesFormat(time.RFC3339, delim)
    +}
    +
    +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
    +func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
    +	return k.getFloat64s(delim, false, true)
    +}
    +
    +// StrictInts returns list of int divided by given delimiter or error on first invalid input.
    +func (k *Key) StrictInts(delim string) ([]int, error) {
    +	return k.getInts(delim, false, true)
    +}
    +
    +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
    +func (k *Key) StrictInt64s(delim string) ([]int64, error) {
    +	return k.getInt64s(delim, false, true)
    +}
    +
    +// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
    +func (k *Key) StrictUints(delim string) ([]uint, error) {
    +	return k.getUints(delim, false, true)
    +}
    +
    +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
    +func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
    +	return k.getUint64s(delim, false, true)
    +}
    +
    +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
    +// or error on first invalid input.
    +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
    +	return k.getTimesFormat(format, delim, false, true)
    +}
    +
    +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
    +// or error on first invalid input.
    +func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
    +	return k.StrictTimesFormat(time.RFC3339, delim)
    +}
    +
    +// getFloat64s returns list of float64 divided by given delimiter.
    +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
    +	strs := k.Strings(delim)
    +	vals := make([]float64, 0, len(strs))
    +	for _, str := range strs {
    +		val, err := strconv.ParseFloat(str, 64)
    +		if err != nil && returnOnInvalid {
    +			return nil, err
    +		}
    +		if err == nil || addInvalid {
    +			vals = append(vals, val)
    +		}
    +	}
    +	return vals, nil
    +}
    +
    +// getInts returns list of int divided by given delimiter.
    +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
    +	strs := k.Strings(delim)
    +	vals := make([]int, 0, len(strs))
    +	for _, str := range strs {
    +		val, err := strconv.Atoi(str)
    +		if err != nil && returnOnInvalid {
    +			return nil, err
    +		}
    +		if err == nil || addInvalid {
    +			vals = append(vals, val)
    +		}
    +	}
    +	return vals, nil
    +}
    +
    +// getInt64s returns list of int64 divided by given delimiter.
    +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
    +	strs := k.Strings(delim)
    +	vals := make([]int64, 0, len(strs))
    +	for _, str := range strs {
    +		val, err := strconv.ParseInt(str, 10, 64)
    +		if err != nil && returnOnInvalid {
    +			return nil, err
    +		}
    +		if err == nil || addInvalid {
    +			vals = append(vals, val)
    +		}
    +	}
    +	return vals, nil
    +}
    +
    +// getUints returns list of uint divided by given delimiter.
    +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
    +	strs := k.Strings(delim)
    +	vals := make([]uint, 0, len(strs))
    +	for _, str := range strs {
    +		val, err := strconv.ParseUint(str, 10, 0)
    +		if err != nil && returnOnInvalid {
    +			return nil, err
    +		}
    +		if err == nil || addInvalid {
    +			vals = append(vals, uint(val))
    +		}
    +	}
    +	return vals, nil
    +}
    +
    +// getUint64s returns list of uint64 divided by given delimiter.
    +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
    +	strs := k.Strings(delim)
    +	vals := make([]uint64, 0, len(strs))
    +	for _, str := range strs {
    +		val, err := strconv.ParseUint(str, 10, 64)
    +		if err != nil && returnOnInvalid {
    +			return nil, err
    +		}
    +		if err == nil || addInvalid {
    +			vals = append(vals, val)
    +		}
    +	}
    +	return vals, nil
    +}
    +
    +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
    +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
    +	strs := k.Strings(delim)
    +	vals := make([]time.Time, 0, len(strs))
    +	for _, str := range strs {
    +		val, err := time.Parse(format, str)
    +		if err != nil && returnOnInvalid {
    +			return nil, err
    +		}
    +		if err == nil || addInvalid {
    +			vals = append(vals, val)
    +		}
    +	}
    +	return vals, nil
    +}
    +
    +// SetValue changes key value.
    +func (k *Key) SetValue(v string) {
    +	if k.s.f.BlockMode {
    +		k.s.f.lock.Lock()
    +		defer k.s.f.lock.Unlock()
    +	}
    +
    +	k.value = v
    +	k.s.keysHash[k.name] = v
    +}
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/parser.go b/src/prometheus/vendor/github.com/go-ini/ini/parser.go
    new file mode 100644
    index 0000000..dc6df87
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/parser.go
    @@ -0,0 +1,325 @@
    +// Copyright 2015 Unknwon
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License"): you may
    +// not use this file except in compliance with the License. You may obtain
    +// a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +// License for the specific language governing permissions and limitations
    +// under the License.
    +
    +package ini
    +
    +import (
    +	"bufio"
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"strconv"
    +	"strings"
    +	"unicode"
    +)
    +
    +type tokenType int
    +
    +const (
    +	_TOKEN_INVALID tokenType = iota
    +	_TOKEN_COMMENT
    +	_TOKEN_SECTION
    +	_TOKEN_KEY
    +)
    +
    +type parser struct {
    +	buf     *bufio.Reader
    +	isEOF   bool
    +	count   int
    +	comment *bytes.Buffer
    +}
    +
    +func newParser(r io.Reader) *parser {
    +	return &parser{
    +		buf:     bufio.NewReader(r),
    +		count:   1,
    +		comment: &bytes.Buffer{},
    +	}
    +}
    +
    +// BOM handles header of BOM-UTF8 format.
    +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
    +func (p *parser) BOM() error {
    +	mask, err := p.buf.Peek(3)
    +	if err != nil && err != io.EOF {
    +		return err
    +	} else if len(mask) < 3 {
    +		return nil
    +	} else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
    +		p.buf.Read(mask)
    +	}
    +	return nil
    +}
    +
    +func (p *parser) readUntil(delim byte) ([]byte, error) {
    +	data, err := p.buf.ReadBytes(delim)
    +	if err != nil {
    +		if err == io.EOF {
    +			p.isEOF = true
    +		} else {
    +			return nil, err
    +		}
    +	}
    +	return data, nil
    +}
    +
    +func cleanComment(in []byte) ([]byte, bool) {
    +	i := bytes.IndexAny(in, "#;")
    +	if i == -1 {
    +		return nil, false
    +	}
    +	return in[i:], true
    +}
    +
    +func readKeyName(in []byte) (string, int, error) {
    +	line := string(in)
    +
    +	// Check if key name surrounded by quotes.
    +	var keyQuote string
    +	if line[0] == '"' {
    +		if len(line) > 6 && string(line[0:3]) == `"""` {
    +			keyQuote = `"""`
    +		} else {
    +			keyQuote = `"`
    +		}
    +	} else if line[0] == '`' {
    +		keyQuote = "`"
    +	}
    +
    +	// Get out key name
    +	endIdx := -1
    +	if len(keyQuote) > 0 {
    +		startIdx := len(keyQuote)
    +		// FIXME: fail case -> """"""name"""=value
    +		pos := strings.Index(line[startIdx:], keyQuote)
    +		if pos == -1 {
    +			return "", -1, fmt.Errorf("missing closing key quote: %s", line)
    +		}
    +		pos += startIdx
    +
    +		// Find key-value delimiter
    +		i := strings.IndexAny(line[pos+startIdx:], "=:")
    +		if i < 0 {
    +			return "", -1, ErrDelimiterNotFound{line}
    +		}
    +		endIdx = pos + i
    +		return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
    +	}
    +
    +	endIdx = strings.IndexAny(line, "=:")
    +	if endIdx < 0 {
    +		return "", -1, ErrDelimiterNotFound{line}
    +	}
    +	return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
    +}
    +
    +func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
    +	for {
    +		data, err := p.readUntil('\n')
    +		if err != nil {
    +			return "", err
    +		}
    +		next := string(data)
    +
    +		pos := strings.LastIndex(next, valQuote)
    +		if pos > -1 {
    +			val += next[:pos]
    +
    +			comment, has := cleanComment([]byte(next[pos:]))
    +			if has {
    +				p.comment.Write(bytes.TrimSpace(comment))
    +			}
    +			break
    +		}
    +		val += next
    +		if p.isEOF {
    +			return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
    +		}
    +	}
    +	return val, nil
    +}
    +
    +func (p *parser) readContinuationLines(val string) (string, error) {
    +	for {
    +		data, err := p.readUntil('\n')
    +		if err != nil {
    +			return "", err
    +		}
    +		next := strings.TrimSpace(string(data))
    +
    +		if len(next) == 0 {
    +			break
    +		}
    +		val += next
    +		if val[len(val)-1] != '\\' {
    +			break
    +		}
    +		val = val[:len(val)-1]
    +	}
    +	return val, nil
    +}
    +
    +// hasSurroundedQuote check if and only if the first and last characters
    +// are quotes \" or \'.
    +// It returns false if any other parts also contain same kind of quotes.
    +func hasSurroundedQuote(in string, quote byte) bool {
    +	return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
    +		strings.IndexByte(in[1:], quote) == len(in)-2
    +}
    +
    +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
    +	line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
    +	if len(line) == 0 {
    +		return "", nil
    +	}
    +
    +	var valQuote string
    +	if len(line) > 3 && string(line[0:3]) == `"""` {
    +		valQuote = `"""`
    +	} else if line[0] == '`' {
    +		valQuote = "`"
    +	}
    +
    +	if len(valQuote) > 0 {
    +		startIdx := len(valQuote)
    +		pos := strings.LastIndex(line[startIdx:], valQuote)
    +		// Check for multi-line value
    +		if pos == -1 {
    +			return p.readMultilines(line, line[startIdx:], valQuote)
    +		}
    +
    +		return line[startIdx : pos+startIdx], nil
    +	}
    +
    +	// Won't be able to reach here if value only contains whitespace.
    +	line = strings.TrimSpace(line)
    +
    +	// Check continuation lines when desired.
    +	if !ignoreContinuation && line[len(line)-1] == '\\' {
    +		return p.readContinuationLines(line[:len(line)-1])
    +	}
    +
    +	i := strings.IndexAny(line, "#;")
    +	if i > -1 {
    +		p.comment.WriteString(line[i:])
    +		line = strings.TrimSpace(line[:i])
    +	}
    +
    +	// Trim single quotes
    +	if hasSurroundedQuote(line, '\'') ||
    +		hasSurroundedQuote(line, '"') {
    +		line = line[1 : len(line)-1]
    +	}
    +	return line, nil
    +}
    +
    +// parse parses data through an io.Reader.
    +func (f *File) parse(reader io.Reader) (err error) {
    +	p := newParser(reader)
    +	if err = p.BOM(); err != nil {
    +		return fmt.Errorf("BOM: %v", err)
    +	}
    +
    +	// Ignore error because default section name is never empty string.
    +	section, _ := f.NewSection(DEFAULT_SECTION)
    +
    +	var line []byte
    +	for !p.isEOF {
    +		line, err = p.readUntil('\n')
    +		if err != nil {
    +			return err
    +		}
    +
    +		line = bytes.TrimLeftFunc(line, unicode.IsSpace)
    +		if len(line) == 0 {
    +			continue
    +		}
    +
    +		// Comments
    +		if line[0] == '#' || line[0] == ';' {
    +			// Note: we do not care ending line break,
    +			// it is needed for adding second line,
    +			// so just clean it once at the end when set to value.
    +			p.comment.Write(line)
    +			continue
    +		}
    +
    +		// Section
    +		if line[0] == '[' {
    +			// Read to the next ']' (TODO: support quoted strings)
    +			// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
    +			closeIdx := bytes.LastIndex(line, []byte("]"))
    +			if closeIdx == -1 {
    +				return fmt.Errorf("unclosed section: %s", line)
    +			}
    +
    +			name := string(line[1:closeIdx])
    +			section, err = f.NewSection(name)
    +			if err != nil {
    +				return err
    +			}
    +
    +			comment, has := cleanComment(line[closeIdx+1:])
    +			if has {
    +				p.comment.Write(comment)
    +			}
    +
    +			section.Comment = strings.TrimSpace(p.comment.String())
    +
    +			// Reset aotu-counter and comments
    +			p.comment.Reset()
    +			p.count = 1
    +			continue
    +		}
    +
    +		kname, offset, err := readKeyName(line)
    +		if err != nil {
    +			// Treat as boolean key when desired, and whole line is key name.
    +			if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
    +				key, err := section.NewKey(string(line), "true")
    +				if err != nil {
    +					return err
    +				}
    +				key.isBooleanType = true
    +				key.Comment = strings.TrimSpace(p.comment.String())
    +				p.comment.Reset()
    +				continue
    +			}
    +			return err
    +		}
    +
    +		// Auto increment.
    +		isAutoIncr := false
    +		if kname == "-" {
    +			isAutoIncr = true
    +			kname = "#" + strconv.Itoa(p.count)
    +			p.count++
    +		}
    +
    +		key, err := section.NewKey(kname, "")
    +		if err != nil {
    +			return err
    +		}
    +		key.isAutoIncrement = isAutoIncr
    +
    +		value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
    +		if err != nil {
    +			return err
    +		}
    +		key.SetValue(value)
    +		key.Comment = strings.TrimSpace(p.comment.String())
    +		p.comment.Reset()
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/section.go b/src/prometheus/vendor/github.com/go-ini/ini/section.go
    new file mode 100644
    index 0000000..bbb73ca
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/section.go
    @@ -0,0 +1,206 @@
    +// Copyright 2014 Unknwon
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License"): you may
    +// not use this file except in compliance with the License. You may obtain
    +// a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +// License for the specific language governing permissions and limitations
    +// under the License.
    +
    +package ini
    +
    +import (
    +	"errors"
    +	"fmt"
    +	"strings"
    +)
    +
    +// Section represents a config section.
    +type Section struct {
    +	f        *File
    +	Comment  string
    +	name     string
    +	keys     map[string]*Key
    +	keyList  []string
    +	keysHash map[string]string
    +}
    +
    +func newSection(f *File, name string) *Section {
    +	return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
    +}
    +
    +// Name returns name of Section.
    +func (s *Section) Name() string {
    +	return s.name
    +}
    +
    +// NewKey creates a new key to given section.
    +func (s *Section) NewKey(name, val string) (*Key, error) {
    +	if len(name) == 0 {
    +		return nil, errors.New("error creating new key: empty key name")
    +	} else if s.f.options.Insensitive {
    +		name = strings.ToLower(name)
    +	}
    +
    +	if s.f.BlockMode {
    +		s.f.lock.Lock()
    +		defer s.f.lock.Unlock()
    +	}
    +
    +	if inSlice(name, s.keyList) {
    +		s.keys[name].value = val
    +		return s.keys[name], nil
    +	}
    +
    +	s.keyList = append(s.keyList, name)
    +	s.keys[name] = &Key{
    +		s:     s,
    +		name:  name,
    +		value: val,
    +	}
    +	s.keysHash[name] = val
    +	return s.keys[name], nil
    +}
    +
    +// GetKey returns key in section by given name.
    +func (s *Section) GetKey(name string) (*Key, error) {
    +	// FIXME: change to section level lock?
    +	if s.f.BlockMode {
    +		s.f.lock.RLock()
    +	}
    +	if s.f.options.Insensitive {
    +		name = strings.ToLower(name)
    +	}
    +	key := s.keys[name]
    +	if s.f.BlockMode {
    +		s.f.lock.RUnlock()
    +	}
    +
    +	if key == nil {
    +		// Check if it is a child-section.
    +		sname := s.name
    +		for {
    +			if i := strings.LastIndex(sname, "."); i > -1 {
    +				sname = sname[:i]
    +				sec, err := s.f.GetSection(sname)
    +				if err != nil {
    +					continue
    +				}
    +				return sec.GetKey(name)
    +			} else {
    +				break
    +			}
    +		}
    +		return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
    +	}
    +	return key, nil
    +}
    +
    +// HasKey returns true if section contains a key with given name.
    +func (s *Section) HasKey(name string) bool {
    +	key, _ := s.GetKey(name)
    +	return key != nil
    +}
    +
    +// Haskey is a backwards-compatible name for HasKey.
    +func (s *Section) Haskey(name string) bool {
    +	return s.HasKey(name)
    +}
    +
    +// HasValue returns true if section contains given raw value.
    +func (s *Section) HasValue(value string) bool {
    +	if s.f.BlockMode {
    +		s.f.lock.RLock()
    +		defer s.f.lock.RUnlock()
    +	}
    +
    +	for _, k := range s.keys {
    +		if value == k.value {
    +			return true
    +		}
    +	}
    +	return false
    +}
    +
    +// Key assumes named Key exists in section and returns a zero-value when not.
    +func (s *Section) Key(name string) *Key {
    +	key, err := s.GetKey(name)
    +	if err != nil {
    +		// It's OK here because the only possible error is empty key name,
    +		// but if it's empty, this piece of code won't be executed.
    +		key, _ = s.NewKey(name, "")
    +		return key
    +	}
    +	return key
    +}
    +
    +// Keys returns list of keys of section.
    +func (s *Section) Keys() []*Key {
    +	keys := make([]*Key, len(s.keyList))
    +	for i := range s.keyList {
    +		keys[i] = s.Key(s.keyList[i])
    +	}
    +	return keys
    +}
    +
    +// ParentKeys returns list of keys of parent section.
    +func (s *Section) ParentKeys() []*Key {
    +	var parentKeys []*Key
    +	sname := s.name
    +	for {
    +		if i := strings.LastIndex(sname, "."); i > -1 {
    +			sname = sname[:i]
    +			sec, err := s.f.GetSection(sname)
    +			if err != nil {
    +				continue
    +			}
    +			parentKeys = append(parentKeys, sec.Keys()...)
    +		} else {
    +			break
    +		}
    +
    +	}
    +	return parentKeys
    +}
    +
    +// KeyStrings returns list of key names of section.
    +func (s *Section) KeyStrings() []string {
    +	list := make([]string, len(s.keyList))
    +	copy(list, s.keyList)
    +	return list
    +}
    +
    +// KeysHash returns keys hash consisting of names and values.
    +func (s *Section) KeysHash() map[string]string {
    +	if s.f.BlockMode {
    +		s.f.lock.RLock()
    +		defer s.f.lock.RUnlock()
    +	}
    +
    +	hash := map[string]string{}
    +	for key, value := range s.keysHash {
    +		hash[key] = value
    +	}
    +	return hash
    +}
    +
    +// DeleteKey deletes a key from section.
    +func (s *Section) DeleteKey(name string) {
    +	if s.f.BlockMode {
    +		s.f.lock.Lock()
    +		defer s.f.lock.Unlock()
    +	}
    +
    +	for i, k := range s.keyList {
    +		if k == name {
    +			s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
    +			delete(s.keys, name)
    +			return
    +		}
    +	}
    +}
    diff --git a/src/prometheus/vendor/github.com/go-ini/ini/struct.go b/src/prometheus/vendor/github.com/go-ini/ini/struct.go
    new file mode 100644
    index 0000000..d00fb4b
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-ini/ini/struct.go
    @@ -0,0 +1,431 @@
    +// Copyright 2014 Unknwon
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License"): you may
    +// not use this file except in compliance with the License. You may obtain
    +// a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +// License for the specific language governing permissions and limitations
    +// under the License.
    +
    +package ini
    +
    +import (
    +	"bytes"
    +	"errors"
    +	"fmt"
    +	"reflect"
    +	"strings"
    +	"time"
    +	"unicode"
    +)
    +
    +// NameMapper represents a ini tag name mapper.
    +type NameMapper func(string) string
    +
    +// Built-in name getters.
    +var (
    +	// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
    +	AllCapsUnderscore NameMapper = func(raw string) string {
    +		newstr := make([]rune, 0, len(raw))
    +		for i, chr := range raw {
    +			if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
    +				if i > 0 {
    +					newstr = append(newstr, '_')
    +				}
    +			}
    +			newstr = append(newstr, unicode.ToUpper(chr))
    +		}
    +		return string(newstr)
    +	}
    +	// TitleUnderscore converts to format title_underscore.
    +	TitleUnderscore NameMapper = func(raw string) string {
    +		newstr := make([]rune, 0, len(raw))
    +		for i, chr := range raw {
    +			if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
    +				if i > 0 {
    +					newstr = append(newstr, '_')
    +				}
    +				chr -= ('A' - 'a')
    +			}
    +			newstr = append(newstr, chr)
    +		}
    +		return string(newstr)
    +	}
    +)
    +
    +func (s *Section) parseFieldName(raw, actual string) string {
    +	if len(actual) > 0 {
    +		return actual
    +	}
    +	if s.f.NameMapper != nil {
    +		return s.f.NameMapper(raw)
    +	}
    +	return raw
    +}
    +
    +func parseDelim(actual string) string {
    +	if len(actual) > 0 {
    +		return actual
    +	}
    +	return ","
    +}
    +
    +var reflectTime = reflect.TypeOf(time.Now()).Kind()
    +
    +// setSliceWithProperType sets proper values to slice based on its type.
    +func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
    +	strs := key.Strings(delim)
    +	numVals := len(strs)
    +	if numVals == 0 {
    +		return nil
    +	}
    +
    +	var vals interface{}
    +
    +	sliceOf := field.Type().Elem().Kind()
    +	switch sliceOf {
    +	case reflect.String:
    +		vals = strs
    +	case reflect.Int:
    +		vals = key.Ints(delim)
    +	case reflect.Int64:
    +		vals = key.Int64s(delim)
    +	case reflect.Uint:
    +		vals = key.Uints(delim)
    +	case reflect.Uint64:
    +		vals = key.Uint64s(delim)
    +	case reflect.Float64:
    +		vals = key.Float64s(delim)
    +	case reflectTime:
    +		vals = key.Times(delim)
    +	default:
    +		return fmt.Errorf("unsupported type '[]%s'", sliceOf)
    +	}
    +
    +	slice := reflect.MakeSlice(field.Type(), numVals, numVals)
    +	for i := 0; i < numVals; i++ {
    +		switch sliceOf {
    +		case reflect.String:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
    +		case reflect.Int:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
    +		case reflect.Int64:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
    +		case reflect.Uint:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
    +		case reflect.Uint64:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
    +		case reflect.Float64:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
    +		case reflectTime:
    +			slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
    +		}
    +	}
    +	field.Set(slice)
    +	return nil
    +}
    +
    +// setWithProperType sets proper value to field based on its type,
    +// but it does not return error for failing parsing,
    +// because we want to use default value that is already assigned to strcut.
    +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
    +	switch t.Kind() {
    +	case reflect.String:
    +		if len(key.String()) == 0 {
    +			return nil
    +		}
    +		field.SetString(key.String())
    +	case reflect.Bool:
    +		boolVal, err := key.Bool()
    +		if err != nil {
    +			return nil
    +		}
    +		field.SetBool(boolVal)
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		durationVal, err := key.Duration()
    +		// Skip zero value
    +		if err == nil && int(durationVal) > 0 {
    +			field.Set(reflect.ValueOf(durationVal))
    +			return nil
    +		}
    +
    +		intVal, err := key.Int64()
    +		if err != nil || intVal == 0 {
    +			return nil
    +		}
    +		field.SetInt(intVal)
    +	//	byte is an alias for uint8, so supporting uint8 breaks support for byte
    +	case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
    +		durationVal, err := key.Duration()
    +		// Skip zero value
    +		if err == nil && int(durationVal) > 0 {
    +			field.Set(reflect.ValueOf(durationVal))
    +			return nil
    +		}
    +
    +		uintVal, err := key.Uint64()
    +		if err != nil {
    +			return nil
    +		}
    +		field.SetUint(uintVal)
    +
    +	case reflect.Float64:
    +		floatVal, err := key.Float64()
    +		if err != nil {
    +			return nil
    +		}
    +		field.SetFloat(floatVal)
    +	case reflectTime:
    +		timeVal, err := key.Time()
    +		if err != nil {
    +			return nil
    +		}
    +		field.Set(reflect.ValueOf(timeVal))
    +	case reflect.Slice:
    +		return setSliceWithProperType(key, field, delim)
    +	default:
    +		return fmt.Errorf("unsupported type '%s'", t)
    +	}
    +	return nil
    +}
    +
    +func (s *Section) mapTo(val reflect.Value) error {
    +	if val.Kind() == reflect.Ptr {
    +		val = val.Elem()
    +	}
    +	typ := val.Type()
    +
    +	for i := 0; i < typ.NumField(); i++ {
    +		field := val.Field(i)
    +		tpField := typ.Field(i)
    +
    +		tag := tpField.Tag.Get("ini")
    +		if tag == "-" {
    +			continue
    +		}
    +
    +		opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
    +		fieldName := s.parseFieldName(tpField.Name, opts[0])
    +		if len(fieldName) == 0 || !field.CanSet() {
    +			continue
    +		}
    +
    +		isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
    +		isStruct := tpField.Type.Kind() == reflect.Struct
    +		if isAnonymous {
    +			field.Set(reflect.New(tpField.Type.Elem()))
    +		}
    +
    +		if isAnonymous || isStruct {
    +			if sec, err := s.f.GetSection(fieldName); err == nil {
    +				if err = sec.mapTo(field); err != nil {
    +					return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
    +				}
    +				continue
    +			}
    +		}
    +
    +		if key, err := s.GetKey(fieldName); err == nil {
    +			if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
    +				return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
    +			}
    +		}
    +	}
    +	return nil
    +}
    +
    +// MapTo maps section to given struct.
    +func (s *Section) MapTo(v interface{}) error {
    +	typ := reflect.TypeOf(v)
    +	val := reflect.ValueOf(v)
    +	if typ.Kind() == reflect.Ptr {
    +		typ = typ.Elem()
    +		val = val.Elem()
    +	} else {
    +		return errors.New("cannot map to non-pointer struct")
    +	}
    +
    +	return s.mapTo(val)
    +}
    +
    +// MapTo maps file to given struct.
    +func (f *File) MapTo(v interface{}) error {
    +	return f.Section("").MapTo(v)
    +}
    +
    +// MapTo maps data sources to given struct with name mapper.
    +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
    +	cfg, err := Load(source, others...)
    +	if err != nil {
    +		return err
    +	}
    +	cfg.NameMapper = mapper
    +	return cfg.MapTo(v)
    +}
    +
    +// MapTo maps data sources to given struct.
    +func MapTo(v, source interface{}, others ...interface{}) error {
    +	return MapToWithMapper(v, nil, source, others...)
    +}
    +
    +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
    +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
    +	slice := field.Slice(0, field.Len())
    +	if field.Len() == 0 {
    +		return nil
    +	}
    +
    +	var buf bytes.Buffer
    +	sliceOf := field.Type().Elem().Kind()
    +	for i := 0; i < field.Len(); i++ {
    +		switch sliceOf {
    +		case reflect.String:
    +			buf.WriteString(slice.Index(i).String())
    +		case reflect.Int, reflect.Int64:
    +			buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
    +		case reflect.Uint, reflect.Uint64:
    +			buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
    +		case reflect.Float64:
    +			buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
    +		case reflectTime:
    +			buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
    +		default:
    +			return fmt.Errorf("unsupported type '[]%s'", sliceOf)
    +		}
    +		buf.WriteString(delim)
    +	}
    +	key.SetValue(buf.String()[:buf.Len()-1])
    +	return nil
    +}
    +
    +// reflectWithProperType does the opposite thing as setWithProperType.
    +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
    +	switch t.Kind() {
    +	case reflect.String:
    +		key.SetValue(field.String())
    +	case reflect.Bool:
    +		key.SetValue(fmt.Sprint(field.Bool()))
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		key.SetValue(fmt.Sprint(field.Int()))
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
    +		key.SetValue(fmt.Sprint(field.Uint()))
    +	case reflect.Float32, reflect.Float64:
    +		key.SetValue(fmt.Sprint(field.Float()))
    +	case reflectTime:
    +		key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
    +	case reflect.Slice:
    +		return reflectSliceWithProperType(key, field, delim)
    +	default:
    +		return fmt.Errorf("unsupported type '%s'", t)
    +	}
    +	return nil
    +}
    +
    +// CR: copied from encoding/json/encode.go with modifications of time.Time support.
    +// TODO: add more test coverage.
    +func isEmptyValue(v reflect.Value) bool {
    +	switch v.Kind() {
    +	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
    +		return v.Len() == 0
    +	case reflect.Bool:
    +		return !v.Bool()
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		return v.Int() == 0
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
    +		return v.Uint() == 0
    +	case reflect.Float32, reflect.Float64:
    +		return v.Float() == 0
    +	case reflectTime:
    +		return v.Interface().(time.Time).IsZero()
    +	case reflect.Interface, reflect.Ptr:
    +		return v.IsNil()
    +	}
    +	return false
    +}
    +
    +func (s *Section) reflectFrom(val reflect.Value) error {
    +	if val.Kind() == reflect.Ptr {
    +		val = val.Elem()
    +	}
    +	typ := val.Type()
    +
    +	for i := 0; i < typ.NumField(); i++ {
    +		field := val.Field(i)
    +		tpField := typ.Field(i)
    +
    +		tag := tpField.Tag.Get("ini")
    +		if tag == "-" {
    +			continue
    +		}
    +
    +		opts := strings.SplitN(tag, ",", 2)
    +		if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
    +			continue
    +		}
    +
    +		fieldName := s.parseFieldName(tpField.Name, opts[0])
    +		if len(fieldName) == 0 || !field.CanSet() {
    +			continue
    +		}
    +
    +		if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
    +			(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
    +			// Note: The only error here is section doesn't exist.
    +			sec, err := s.f.GetSection(fieldName)
    +			if err != nil {
    +				// Note: fieldName can never be empty here, ignore error.
    +				sec, _ = s.f.NewSection(fieldName)
    +			}
    +			if err = sec.reflectFrom(field); err != nil {
    +				return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
    +			}
    +			continue
    +		}
    +
    +		// Note: Same reason as secion.
    +		key, err := s.GetKey(fieldName)
    +		if err != nil {
    +			key, _ = s.NewKey(fieldName, "")
    +		}
    +		if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
    +			return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
    +		}
    +
    +	}
    +	return nil
    +}
    +
    +// ReflectFrom reflects secion from given struct.
    +func (s *Section) ReflectFrom(v interface{}) error {
    +	typ := reflect.TypeOf(v)
    +	val := reflect.ValueOf(v)
    +	if typ.Kind() == reflect.Ptr {
    +		typ = typ.Elem()
    +		val = val.Elem()
    +	} else {
    +		return errors.New("cannot reflect from non-pointer struct")
    +	}
    +
    +	return s.reflectFrom(val)
    +}
    +
    +// ReflectFrom reflects file from given struct.
    +func (f *File) ReflectFrom(v interface{}) error {
    +	return f.Section("").ReflectFrom(v)
    +}
    +
    +// ReflectFrom reflects data sources from given struct with name mapper.
    +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
    +	cfg.NameMapper = mapper
    +	return cfg.ReflectFrom(v)
    +}
    +
    +// ReflectFrom reflects data sources from given struct.
    +func ReflectFrom(cfg *File, v interface{}) error {
    +	return ReflectFromWithMapper(cfg, v, nil)
    +}
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/LICENSE b/src/prometheus/vendor/github.com/go-kit/kit/LICENSE
    new file mode 100644
    index 0000000..9d83342
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/LICENSE
    @@ -0,0 +1,22 @@
    +The MIT License (MIT)
    +
    +Copyright (c) 2015 Peter Bourgon
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/README.md b/src/prometheus/vendor/github.com/go-kit/kit/log/README.md
    new file mode 100644
    index 0000000..7222f80
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/README.md
    @@ -0,0 +1,147 @@
    +# package log
    +
    +`package log` provides a minimal interface for structured logging in services.
    +It may be wrapped to encode conventions, enforce type-safety, provide leveled
    +logging, and so on. It can be used for both typical application log events,
    +and log-structured data streams.
    +
    +## Structured logging
    +
    +Structured logging is, basically, conceding to the reality that logs are
    +_data_, and warrant some level of schematic rigor. Using a stricter,
    +key/value-oriented message format for our logs, containing contextual and
    +semantic information, makes it much easier to get insight into the
    +operational activity of the systems we build. Consequently, `package log` is
    +of the strong belief that "[the benefits of structured logging outweigh the
    +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
    +
    +Migrating from unstructured to structured logging is probably a lot easier
    +than you'd expect.
    +
    +```go
    +// Unstructured
    +log.Printf("HTTP server listening on %s", addr)
    +
    +// Structured
    +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
    +```
    +
    +## Usage
    +
    +### Typical application logging
    +
    +```go
    +w := log.NewSyncWriter(os.Stderr)
    +logger := log.NewLogfmtLogger(w)
    +logger.Log("question", "what is the meaning of life?", "answer", 42)
    +
    +// Output:
    +// question="what is the meaning of life?" answer=42
    +```
    +
    +### Contextual Loggers
    +
    +```go
    +func main() {
    +	var logger log.Logger
    +	logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
    +	logger = log.With(logger, "instance_id", 123)
    +
    +	logger.Log("msg", "starting")
    +	NewWorker(log.With(logger, "component", "worker")).Run()
    +	NewSlacker(log.With(logger, "component", "slacker")).Run()
    +}
    +
    +// Output:
    +// instance_id=123 msg=starting
    +// instance_id=123 component=worker msg=running
    +// instance_id=123 component=slacker msg=running
    +```
    +
    +### Interact with stdlib logger
    +
    +Redirect stdlib logger to Go kit logger.
    +
    +```go
    +import (
    +	"os"
    +	stdlog "log"
    +	kitlog "github.com/go-kit/kit/log"
    +)
    +
    +func main() {
    +	logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
    +	stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
    +	stdlog.Print("I sure like pie")
    +}
    +
    +// Output:
    +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
    +```
    +
    +Or, if, for legacy reasons, you need to pipe all of your logging through the
    +stdlib log package, you can redirect Go kit logger to the stdlib logger.
    +
    +```go
    +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
    +logger.Log("legacy", true, "msg", "at least it's something")
    +
    +// Output:
    +// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
    +```
    +
    +### Timestamps and callers
    +
    +```go
    +var logger log.Logger
    +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
    +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
    +
    +logger.Log("msg", "hello")
    +
    +// Output:
    +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
    +```
    +
    +## Supported output formats
    +
    +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
    +- JSON
    +
    +## Enhancements
    +
    +`package log` is centered on the one-method Logger interface.
    +
    +```go
    +type Logger interface {
    +	Log(keyvals ...interface{}) error
    +}
    +```
    +
    +This interface, and its supporting code like is the product of much iteration
    +and evaluation. For more details on the evolution of the Logger interface,
    +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
    +a talk by [Chris Hines](https://github.com/ChrisHines).
    +Also, please see
    +[#63](https://github.com/go-kit/kit/issues/63),
    +[#76](https://github.com/go-kit/kit/pull/76),
    +[#131](https://github.com/go-kit/kit/issues/131),
    +[#157](https://github.com/go-kit/kit/pull/157),
    +[#164](https://github.com/go-kit/kit/issues/164), and
    +[#252](https://github.com/go-kit/kit/pull/252)
    +to review historical conversations about package log and the Logger interface.
    +
    +Value-add packages and suggestions,
    +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
    +are of course welcome. Good proposals should
    +
    +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
    +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
    +- Be friendly to packages that accept only an unadorned log.Logger.
    +
    +## Benchmarks & comparisons
    +
    +There are a few Go logging benchmarks and comparisons that include Go kit's package log.
    +
    +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
    +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/doc.go b/src/prometheus/vendor/github.com/go-kit/kit/log/doc.go
    new file mode 100644
    index 0000000..918c0af
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/doc.go
    @@ -0,0 +1,116 @@
    +// Package log provides a structured logger.
    +//
    +// Structured logging produces logs easily consumed later by humans or
    +// machines. Humans might be interested in debugging errors, or tracing
    +// specific requests. Machines might be interested in counting interesting
    +// events, or aggregating information for off-line processing. In both cases,
    +// it is important that the log messages are structured and actionable.
    +// Package log is designed to encourage both of these best practices.
    +//
    +// Basic Usage
    +//
    +// The fundamental interface is Logger. Loggers create log events from
    +// key/value data. The Logger interface has a single method, Log, which
    +// accepts a sequence of alternating key/value pairs, which this package names
    +// keyvals.
    +//
    +//    type Logger interface {
    +//        Log(keyvals ...interface{}) error
    +//    }
    +//
    +// Here is an example of a function using a Logger to create log events.
    +//
    +//    func RunTask(task Task, logger log.Logger) string {
    +//        logger.Log("taskID", task.ID, "event", "starting task")
    +//        ...
    +//        logger.Log("taskID", task.ID, "event", "task complete")
    +//    }
    +//
    +// The keys in the above example are "taskID" and "event". The values are
    +// task.ID, "starting task", and "task complete". Every key is followed
    +// immediately by its value.
    +//
    +// Keys are usually plain strings. Values may be any type that has a sensible
    +// encoding in the chosen log format. With structured logging it is a good
    +// idea to log simple values without formatting them. This practice allows
    +// the chosen logger to encode values in the most appropriate way.
    +//
    +// Contextual Loggers
    +//
    +// A contextual logger stores keyvals that it includes in all log events.
    +// Building appropriate contextual loggers reduces repetition and aids
    +// consistency in the resulting log output. With and WithPrefix add context to
    +// a logger. We can use With to improve the RunTask example.
    +//
    +//    func RunTask(task Task, logger log.Logger) string {
    +//        logger = log.With(logger, "taskID", task.ID)
    +//        logger.Log("event", "starting task")
    +//        ...
    +//        taskHelper(task.Cmd, logger)
    +//        ...
    +//        logger.Log("event", "task complete")
    +//    }
    +//
    +// The improved version emits the same log events as the original for the
    +// first and last calls to Log. Passing the contextual logger to taskHelper
    +// enables each log event created by taskHelper to include the task.ID even
    +// though taskHelper does not have access to that value. Using contextual
    +// loggers this way simplifies producing log output that enables tracing the
    +// life cycle of individual tasks. (See the Contextual example for the full
    +// code of the above snippet.)
    +//
    +// Dynamic Contextual Values
    +//
    +// A Valuer function stored in a contextual logger generates a new value each
    +// time an event is logged. The Valuer example demonstrates how this feature
    +// works.
    +//
    +// Valuers provide the basis for consistently logging timestamps and source
    +// code location. The log package defines several valuers for that purpose.
    +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
    +// DefaultCaller. A common logger initialization sequence that ensures all log
    +// entries contain a timestamp and source location looks like this:
    +//
    +//    logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
    +//    logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
    +//
    +// Concurrent Safety
    +//
    +// Applications with multiple goroutines want each log event written to the
    +// same logger to remain separate from other log events. Package log provides
    +// two simple solutions for concurrent safe logging.
    +//
    +// NewSyncWriter wraps an io.Writer and serializes each call to its Write
    +// method. Using a SyncWriter has the benefit that the smallest practical
    +// portion of the logging logic is performed within a mutex, but it requires
    +// the formatting Logger to make only one call to Write per log event.
    +//
    +// NewSyncLogger wraps any Logger and serializes each call to its Log method.
    +// Using a SyncLogger has the benefit that it guarantees each log event is
    +// handled atomically within the wrapped logger, but it typically serializes
    +// both the formatting and output logic. Use a SyncLogger if the formatting
    +// logger may perform multiple writes per log event.
    +//
    +// Error Handling
    +//
    +// This package relies on the practice of wrapping or decorating loggers with
    +// other loggers to provide composable pieces of functionality. It also means
    +// that Logger.Log must return an error because some
    +// implementations—especially those that output log data to an io.Writer—may
    +// encounter errors that cannot be handled locally. This in turn means that
    +// Loggers that wrap other loggers should return errors from the wrapped
    +// logger up the stack.
    +//
    +// Fortunately, the decorator pattern also provides a way to avoid the
    +// necessity to check for errors every time an application calls Logger.Log.
    +// An application required to panic whenever its Logger encounters
    +// an error could initialize its logger as follows.
    +//
    +//    fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
    +//    logger := log.LoggerFunc(func(keyvals ...interface{}) error {
    +//        if err := fmtlogger.Log(keyvals...); err != nil {
    +//            panic(err)
    +//        }
    +//        return nil
    +//    })
    +package log
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/json_logger.go b/src/prometheus/vendor/github.com/go-kit/kit/log/json_logger.go
    new file mode 100644
    index 0000000..231e099
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/json_logger.go
    @@ -0,0 +1,92 @@
    +package log
    +
    +import (
    +	"encoding"
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"reflect"
    +)
    +
    +type jsonLogger struct {
    +	io.Writer
    +}
    +
    +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
    +// single JSON object. Each log event produces no more than one call to
    +// w.Write. The passed Writer must be safe for concurrent use by multiple
    +// goroutines if the returned Logger will be used concurrently.
    +func NewJSONLogger(w io.Writer) Logger {
    +	return &jsonLogger{w}
    +}
    +
    +func (l *jsonLogger) Log(keyvals ...interface{}) error {
    +	n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
    +	m := make(map[string]interface{}, n)
    +	for i := 0; i < len(keyvals); i += 2 {
    +		k := keyvals[i]
    +		var v interface{} = ErrMissingValue
    +		if i+1 < len(keyvals) {
    +			v = keyvals[i+1]
    +		}
    +		merge(m, k, v)
    +	}
    +	return json.NewEncoder(l.Writer).Encode(m)
    +}
    +
    +func merge(dst map[string]interface{}, k, v interface{}) {
    +	var key string
    +	switch x := k.(type) {
    +	case string:
    +		key = x
    +	case fmt.Stringer:
    +		key = safeString(x)
    +	default:
    +		key = fmt.Sprint(x)
    +	}
    +	if x, ok := v.(error); ok {
    +		v = safeError(x)
    +	}
    +
    +	// We want json.Marshaler and encoding.TextMarshaller to take priority over
    +	// err.Error() and v.String(). But json.Marshall (called later) does that by
    +	// default so we force a no-op if it's one of those 2 case.
    +	switch x := v.(type) {
    +	case json.Marshaler:
    +	case encoding.TextMarshaler:
    +	case error:
    +		v = safeError(x)
    +	case fmt.Stringer:
    +		v = safeString(x)
    +	}
    +
    +	dst[key] = v
    +}
    +
    +func safeString(str fmt.Stringer) (s string) {
    +	defer func() {
    +		if panicVal := recover(); panicVal != nil {
    +			if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
    +				s = "NULL"
    +			} else {
    +				panic(panicVal)
    +			}
    +		}
    +	}()
    +	s = str.String()
    +	return
    +}
    +
    +func safeError(err error) (s interface{}) {
    +	defer func() {
    +		if panicVal := recover(); panicVal != nil {
    +			if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
    +				s = nil
    +			} else {
    +				panic(panicVal)
    +			}
    +		}
    +	}()
    +	s = err.Error()
    +	return
    +}
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/level/doc.go b/src/prometheus/vendor/github.com/go-kit/kit/log/level/doc.go
    new file mode 100644
    index 0000000..5e9df7f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/level/doc.go
    @@ -0,0 +1,22 @@
    +// Package level implements leveled logging on top of package log. To use the
    +// level package, create a logger as per normal in your func main, and wrap it
    +// with level.NewFilter.
    +//
    +//    var logger log.Logger
    +//    logger = log.NewLogfmtLogger(os.Stderr)
    +//    logger = level.NewFilter(logger, level.AllowInfoAndAbove()) // <--
    +//    logger = log.With(logger, "ts", log.DefaultTimestampUTC)
    +//
    +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
    +// helper methods to emit leveled log events.
    +//
    +//    logger.Log("foo", "bar") // as normal, no level
    +//    level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
    +//    if value > 100 {
    +//        level.Error(logger).Log("value", value)
    +//    }
    +//
    +// NewFilter allows precise control over what happens when a log event is
    +// emitted without a level key, or if a squelched level is used. Check the
    +// Option functions for details.
    +package level
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/level/level.go b/src/prometheus/vendor/github.com/go-kit/kit/log/level/level.go
    new file mode 100644
    index 0000000..6833b0d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/level/level.go
    @@ -0,0 +1,205 @@
    +package level
    +
    +import "github.com/go-kit/kit/log"
    +
    +// Error returns a logger that includes a Key/ErrorValue pair.
    +func Error(logger log.Logger) log.Logger {
    +	return log.WithPrefix(logger, Key(), ErrorValue())
    +}
    +
    +// Warn returns a logger that includes a Key/WarnValue pair.
    +func Warn(logger log.Logger) log.Logger {
    +	return log.WithPrefix(logger, Key(), WarnValue())
    +}
    +
    +// Info returns a logger that includes a Key/InfoValue pair.
    +func Info(logger log.Logger) log.Logger {
    +	return log.WithPrefix(logger, Key(), InfoValue())
    +}
    +
    +// Debug returns a logger that includes a Key/DebugValue pair.
    +func Debug(logger log.Logger) log.Logger {
    +	return log.WithPrefix(logger, Key(), DebugValue())
    +}
    +
    +// NewFilter wraps next and implements level filtering. See the commentary on
    +// the Option functions for a detailed description of how to configure levels.
    +// If no options are provided, all leveled log events created with Debug,
    +// Info, Warn or Error helper methods are squelched and non-leveled log
    +// events are passed to next unmodified.
    +func NewFilter(next log.Logger, options ...Option) log.Logger {
    +	l := &logger{
    +		next: next,
    +	}
    +	for _, option := range options {
    +		option(l)
    +	}
    +	return l
    +}
    +
    +type logger struct {
    +	next           log.Logger
    +	allowed        level
    +	squelchNoLevel bool
    +	errNotAllowed  error
    +	errNoLevel     error
    +}
    +
    +func (l *logger) Log(keyvals ...interface{}) error {
    +	var hasLevel, levelAllowed bool
    +	for i := 1; i < len(keyvals); i += 2 {
    +		if v, ok := keyvals[i].(*levelValue); ok {
    +			hasLevel = true
    +			levelAllowed = l.allowed&v.level != 0
    +			break
    +		}
    +	}
    +	if !hasLevel && l.squelchNoLevel {
    +		return l.errNoLevel
    +	}
    +	if hasLevel && !levelAllowed {
    +		return l.errNotAllowed
    +	}
    +	return l.next.Log(keyvals...)
    +}
    +
    +// Option sets a parameter for the leveled logger.
    +type Option func(*logger)
    +
    +// AllowAll is an alias for AllowDebug.
    +func AllowAll() Option {
    +	return AllowDebug()
    +}
    +
    +// AllowDebug allows error, warn, info and debug level log events to pass.
    +func AllowDebug() Option {
    +	return allowed(levelError | levelWarn | levelInfo | levelDebug)
    +}
    +
    +// AllowInfo allows error, warn and info level log events to pass.
    +func AllowInfo() Option {
    +	return allowed(levelError | levelWarn | levelInfo)
    +}
    +
    +// AllowWarn allows error and warn level log events to pass.
    +func AllowWarn() Option {
    +	return allowed(levelError | levelWarn)
    +}
    +
    +// AllowError allows only error level log events to pass.
    +func AllowError() Option {
    +	return allowed(levelError)
    +}
    +
    +// AllowNone allows no leveled log events to pass.
    +func AllowNone() Option {
    +	return allowed(0)
    +}
    +
    +func allowed(allowed level) Option {
    +	return func(l *logger) { l.allowed = allowed }
    +}
    +
    +// ErrNotAllowed sets the error to return from Log when it squelches a log
    +// event disallowed by the configured Allow[Level] option. By default,
    +// ErrNotAllowed is nil; in this case the log event is squelched with no
    +// error.
    +func ErrNotAllowed(err error) Option {
    +	return func(l *logger) { l.errNotAllowed = err }
    +}
    +
    +// SquelchNoLevel instructs Log to squelch log events with no level, so that
    +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
    +// to true and a log event is squelched in this way, the error value
    +// configured with ErrNoLevel is returned to the caller.
    +func SquelchNoLevel(squelch bool) Option {
    +	return func(l *logger) { l.squelchNoLevel = squelch }
    +}
    +
    +// ErrNoLevel sets the error to return from Log when it squelches a log event
    +// with no level. By default, ErrNoLevel is nil; in this case the log event is
    +// squelched with no error.
    +func ErrNoLevel(err error) Option {
    +	return func(l *logger) { l.errNoLevel = err }
    +}
    +
    +// NewInjector wraps next and returns a logger that adds a Key/level pair to
    +// the beginning of log events that don't already contain a level. In effect,
    +// this gives a default level to logs without a level.
    +func NewInjector(next log.Logger, level Value) log.Logger {
    +	return &injector{
    +		next:  next,
    +		level: level,
    +	}
    +}
    +
    +type injector struct {
    +	next  log.Logger
    +	level interface{}
    +}
    +
    +func (l *injector) Log(keyvals ...interface{}) error {
    +	for i := 1; i < len(keyvals); i += 2 {
    +		if _, ok := keyvals[i].(*levelValue); ok {
    +			return l.next.Log(keyvals...)
    +		}
    +	}
    +	kvs := make([]interface{}, len(keyvals)+2)
    +	kvs[0], kvs[1] = key, l.level
    +	copy(kvs[2:], keyvals)
    +	return l.next.Log(kvs...)
    +}
    +
    +// Value is the interface that each of the canonical level values implement.
    +// It contains unexported methods that prevent types from other packages from
    +// implementing it and guaranteeing that NewFilter can distinguish the levels
    +// defined in this package from all other values.
    +type Value interface {
    +	String() string
    +	levelVal()
    +}
    +
    +// Key returns the unique key added to log events by the loggers in this
    +// package.
    +func Key() interface{} { return key }
    +
    +// ErrorValue returns the unique value added to log events by Error.
    +func ErrorValue() Value { return errorValue }
    +
    +// WarnValue returns the unique value added to log events by Warn.
    +func WarnValue() Value { return warnValue }
    +
    +// InfoValue returns the unique value added to log events by Info.
    +func InfoValue() Value { return infoValue }
    +
    +// DebugValue returns the unique value added to log events by Warn.
    +func DebugValue() Value { return debugValue }
    +
    +var (
    +	// key is of type interfae{} so that it allocates once during package
    +	// initialization and avoids allocating every type the value is added to a
    +	// []interface{} later.
    +	key interface{} = "level"
    +
    +	errorValue = &levelValue{level: levelError, name: "error"}
    +	warnValue  = &levelValue{level: levelWarn, name: "warn"}
    +	infoValue  = &levelValue{level: levelInfo, name: "info"}
    +	debugValue = &levelValue{level: levelDebug, name: "debug"}
    +)
    +
    +type level byte
    +
    +const (
    +	levelDebug level = 1 << iota
    +	levelInfo
    +	levelWarn
    +	levelError
    +)
    +
    +type levelValue struct {
    +	name string
    +	level
    +}
    +
    +func (v *levelValue) String() string { return v.name }
    +func (v *levelValue) levelVal()      {}
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/log.go b/src/prometheus/vendor/github.com/go-kit/kit/log/log.go
    new file mode 100644
    index 0000000..66a9e2f
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/log.go
    @@ -0,0 +1,135 @@
    +package log
    +
    +import "errors"
    +
    +// Logger is the fundamental interface for all log operations. Log creates a
    +// log event from keyvals, a variadic sequence of alternating keys and values.
    +// Implementations must be safe for concurrent use by multiple goroutines. In
    +// particular, any implementation of Logger that appends to keyvals or
    +// modifies or retains any of its elements must make a copy first.
    +type Logger interface {
    +	Log(keyvals ...interface{}) error
    +}
    +
    +// ErrMissingValue is appended to keyvals slices with odd length to substitute
    +// the missing value.
    +var ErrMissingValue = errors.New("(MISSING)")
    +
    +// With returns a new contextual logger with keyvals prepended to those passed
    +// to calls to Log. If logger is also a contextual logger created by With or
    +// WithPrefix, keyvals is appended to the existing context.
    +//
    +// The returned Logger replaces all value elements (odd indexes) containing a
    +// Valuer with their generated value for each call to its Log method.
    +func With(logger Logger, keyvals ...interface{}) Logger {
    +	if len(keyvals) == 0 {
    +		return logger
    +	}
    +	l := newContext(logger)
    +	kvs := append(l.keyvals, keyvals...)
    +	if len(kvs)%2 != 0 {
    +		kvs = append(kvs, ErrMissingValue)
    +	}
    +	return &context{
    +		logger: l.logger,
    +		// Limiting the capacity of the stored keyvals ensures that a new
    +		// backing array is created if the slice must grow in Log or With.
    +		// Using the extra capacity without copying risks a data race that
    +		// would violate the Logger interface contract.
    +		keyvals:   kvs[:len(kvs):len(kvs)],
    +		hasValuer: l.hasValuer || containsValuer(keyvals),
    +	}
    +}
    +
    +// WithPrefix returns a new contextual logger with keyvals prepended to those
    +// passed to calls to Log. If logger is also a contextual logger created by
    +// With or WithPrefix, keyvals is prepended to the existing context.
    +//
    +// The returned Logger replaces all value elements (odd indexes) containing a
    +// Valuer with their generated value for each call to its Log method.
    +func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
    +	if len(keyvals) == 0 {
    +		return logger
    +	}
    +	l := newContext(logger)
    +	// Limiting the capacity of the stored keyvals ensures that a new
    +	// backing array is created if the slice must grow in Log or With.
    +	// Using the extra capacity without copying risks a data race that
    +	// would violate the Logger interface contract.
    +	n := len(l.keyvals) + len(keyvals)
    +	if len(keyvals)%2 != 0 {
    +		n++
    +	}
    +	kvs := make([]interface{}, 0, n)
    +	kvs = append(kvs, keyvals...)
    +	if len(kvs)%2 != 0 {
    +		kvs = append(kvs, ErrMissingValue)
    +	}
    +	kvs = append(kvs, l.keyvals...)
    +	return &context{
    +		logger:    l.logger,
    +		keyvals:   kvs,
    +		hasValuer: l.hasValuer || containsValuer(keyvals),
    +	}
    +}
    +
    +// context is the Logger implementation returned by With and WithPrefix. It
    +// wraps a Logger and holds keyvals that it includes in all log events. Its
    +// Log method calls bindValues to generate values for each Valuer in the
    +// context keyvals.
    +//
    +// A context must always have the same number of stack frames between calls to
    +// its Log method and the eventual binding of Valuers to their value. This
    +// requirement comes from the functional requirement to allow a context to
    +// resolve application call site information for a Caller stored in the
    +// context. To do this we must be able to predict the number of logging
    +// functions on the stack when bindValues is called.
    +//
    +// Two implementation details provide the needed stack depth consistency.
    +//
    +//    1. newContext avoids introducing an additional layer when asked to
    +//       wrap another context.
    +//    2. With and WithPrefix avoid introducing an additional layer by
    +//       returning a newly constructed context with a merged keyvals rather
    +//       than simply wrapping the existing context.
    +type context struct {
    +	logger    Logger
    +	keyvals   []interface{}
    +	hasValuer bool
    +}
    +
    +func newContext(logger Logger) *context {
    +	if c, ok := logger.(*context); ok {
    +		return c
    +	}
    +	return &context{logger: logger}
    +}
    +
    +// Log replaces all value elements (odd indexes) containing a Valuer in the
    +// stored context with their generated value, appends keyvals, and passes the
    +// result to the wrapped Logger.
    +func (l *context) Log(keyvals ...interface{}) error {
    +	kvs := append(l.keyvals, keyvals...)
    +	if len(kvs)%2 != 0 {
    +		kvs = append(kvs, ErrMissingValue)
    +	}
    +	if l.hasValuer {
    +		// If no keyvals were appended above then we must copy l.keyvals so
    +		// that future log events will reevaluate the stored Valuers.
    +		if len(keyvals) == 0 {
    +			kvs = append([]interface{}{}, l.keyvals...)
    +		}
    +		bindValues(kvs[:len(l.keyvals)])
    +	}
    +	return l.logger.Log(kvs...)
    +}
    +
    +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
    +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
    +// object that calls f.
    +type LoggerFunc func(...interface{}) error
    +
    +// Log implements Logger by calling f(keyvals...).
    +func (f LoggerFunc) Log(keyvals ...interface{}) error {
    +	return f(keyvals...)
    +}
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/src/prometheus/vendor/github.com/go-kit/kit/log/logfmt_logger.go
    new file mode 100644
    index 0000000..a003052
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/logfmt_logger.go
    @@ -0,0 +1,62 @@
    +package log
    +
    +import (
    +	"bytes"
    +	"io"
    +	"sync"
    +
    +	"github.com/go-logfmt/logfmt"
    +)
    +
    +type logfmtEncoder struct {
    +	*logfmt.Encoder
    +	buf bytes.Buffer
    +}
    +
    +func (l *logfmtEncoder) Reset() {
    +	l.Encoder.Reset()
    +	l.buf.Reset()
    +}
    +
    +var logfmtEncoderPool = sync.Pool{
    +	New: func() interface{} {
    +		var enc logfmtEncoder
    +		enc.Encoder = logfmt.NewEncoder(&enc.buf)
    +		return &enc
    +	},
    +}
    +
    +type logfmtLogger struct {
    +	w io.Writer
    +}
    +
    +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
    +// logfmt format. Each log event produces no more than one call to w.Write.
    +// The passed Writer must be safe for concurrent use by multiple goroutines if
    +// the returned Logger will be used concurrently.
    +func NewLogfmtLogger(w io.Writer) Logger {
    +	return &logfmtLogger{w}
    +}
    +
    +func (l logfmtLogger) Log(keyvals ...interface{}) error {
    +	enc := logfmtEncoderPool.Get().(*logfmtEncoder)
    +	enc.Reset()
    +	defer logfmtEncoderPool.Put(enc)
    +
    +	if err := enc.EncodeKeyvals(keyvals...); err != nil {
    +		return err
    +	}
    +
    +	// Add newline to the end of the buffer
    +	if err := enc.EndRecord(); err != nil {
    +		return err
    +	}
    +
    +	// The Logger interface requires implementations to be safe for concurrent
    +	// use by multiple goroutines. For this implementation that means making
    +	// only one call to l.w.Write() for each call to Log.
    +	if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/nop_logger.go b/src/prometheus/vendor/github.com/go-kit/kit/log/nop_logger.go
    new file mode 100644
    index 0000000..1047d62
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/nop_logger.go
    @@ -0,0 +1,8 @@
    +package log
    +
    +type nopLogger struct{}
    +
    +// NewNopLogger returns a logger that doesn't do anything.
    +func NewNopLogger() Logger { return nopLogger{} }
    +
    +func (nopLogger) Log(...interface{}) error { return nil }
    diff --git a/src/prometheus/vendor/github.com/go-kit/kit/log/stdlib.go b/src/prometheus/vendor/github.com/go-kit/kit/log/stdlib.go
    new file mode 100644
    index 0000000..ff96b5d
    --- /dev/null
    +++ b/src/prometheus/vendor/github.com/go-kit/kit/log/stdlib.go
    @@ -0,0 +1,116 @@
    +package log
    +
    +import (
    +	"io"
    +	"log"
    +	"regexp"
    +	"strings"
    +)
    +
    +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
    +// designed to be passed to a Go kit logger as the writer, for cases where
    +// it's necessary to redirect all Go kit log output to the stdlib logger.
    +//
    +// If you have any choice in the matter, you shouldn't use this. Prefer to
    +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
    +type StdlibWriter struct{}
    +
    +// Write implements io.Writer.
    +func (w StdlibWriter) Write(p []byte) (int, error) {
    +	log.Print(strings.TrimSpace(string(p)))
    +	return len(p), nil
    +}
    +
    +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
    +// logger's SetOutput. It will extract date/timestamps, filenames, and
    +// messages, and place them under relevant keys.
    +type StdlibAdapter struct {
    +	Logger
    +	timestampKey string
    +	fileKey      string
    +	messageKey   string
    +}
    +
    +// StdlibAdapterOption sets a parameter for the StdlibAdapter.
    +type StdlibAdapterOption func(*StdlibAdapter)
    +
    +// TimestampKey sets the key for the timestamp field. By default, it's "ts".
    +func TimestampKey(key string) StdlibAdapterOption {
    +	return func(a *StdlibAdapter) { a.timestampKey = key }
    +}
    +
    +// FileKey sets the key for the file and line field. By default, it's "caller".
    +func FileKey(key string) StdlibAdapterOption {
    +	return func(a *StdlibAdapter) { a.fileKey = key }
    +}
    +
    +// MessageKey sets the key for the actual log message. By default, it's "msg".
    +func MessageKey(key string) StdlibAdapterOption {
    +	return func(a *StdlibAdapter) { a.messageKey = key }
    +}
    +
    +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
    +// logger. It's designed to be passed to log.SetOutput.
    +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
    +	a := StdlibAdapter{
    +		Logger:       logger,
    +		timestampKey: "ts",
    +		fileKey:      "caller",
    +		messageKey:   "msg",
    +	}
    +	for _, option := range options {
    +		option(&a)
    +	}
    +	return a
    +}
    +
    +func (a StdlibAdapter) Write(p []byte) (int, error) {
    +	result := subexps(p)
    +	keyvals := []interface{}{}
    +	var timestamp string
    +	if date, ok := result["date"]; ok && date != "" {
    +		timestamp = date
    +	}
    +	if time, ok := result["time"]; ok && time != "" {
    +		if timestamp != "" {
    +			timestamp += " "
    +		}
    +		timestamp += time
    +	}
    +	if timestamp != "" {
    +		keyvals = append(keyvals, a.timestampKey, timestamp)
    +	}
    +	if file, ok := result["file"]; ok && file != "" {
    +		keyvals = append(keyvals, a.fileKey, file)
    +	}
    +	if msg, ok := result["msg"]; ok {
    +		keyvals = append(keyvals, a.messageKey, msg)
    +	}
    +	if err := a.Logger.Log(keyvals...); err != nil {
    +		return 0, err
    +	}
    +	return len(p), nil
    +}
    +
    +const (
    +	logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
    +	logRegexpTime = `(?P
    + + {{$alertStateToRowClass := .AlertStateToRowClass}} + {{range .AlertingRules}} + {{$activeAlerts := .ActiveAlerts}} + + + + + + + {{else}} + + + + {{end}} + +
    {{.Name}} ({{len $activeAlerts}} active)
    +
    +
    {{.HTMLSnippet pathPrefix}}
    +
    + {{if $activeAlerts}} + + + + + + + + {{range $activeAlerts}} + + + + + + + {{ if .Annotations.Map}} + + + + + + + {{end}} + {{end}} +
    LabelsStateActive SinceValue
    + {{range $label, $value := .Labels.Map}} + {{$label}}="{{$value}}" + {{end}} + {{.State}}{{.ActiveAt.UTC}}{{.Value}}
    + {{end}} +
    + No alerting rules defined +
    + +{{end}} diff --git a/src/prometheus/web/ui/templates/config.html b/src/prometheus/web/ui/templates/config.html new file mode 100644 index 0000000..50d392a --- /dev/null +++ b/src/prometheus/web/ui/templates/config.html @@ -0,0 +1,8 @@ +{{define "head"}}{{end}} + +{{define "content"}} +
    +

    Configuration

    +
    {{.}}
    +
    +{{end}} diff --git a/src/prometheus/web/ui/templates/flags.html b/src/prometheus/web/ui/templates/flags.html new file mode 100644 index 0000000..6228cdb --- /dev/null +++ b/src/prometheus/web/ui/templates/flags.html @@ -0,0 +1,17 @@ +{{define "head"}}{{end}} + +{{define "content"}} +
    +

    Command-Line Flags

    + + + {{range $key, $value := . }} + + + + + {{end}} + +
    {{$key}}{{$value}}
    +
    +{{end}} diff --git a/src/prometheus/web/ui/templates/graph.html b/src/prometheus/web/ui/templates/graph.html new file mode 100644 index 0000000..ce53bd4 --- /dev/null +++ b/src/prometheus/web/ui/templates/graph.html @@ -0,0 +1,36 @@ +{{define "head"}} + + + + + + + + + + + + + + + + + + + + + +{{end}} + +{{define "content"}} +
    +
    + + +
    +
    + +
    +
    +
    +{{end}} diff --git a/src/prometheus/web/ui/templates/rules.html b/src/prometheus/web/ui/templates/rules.html new file mode 100644 index 0000000..17c03d7 --- /dev/null +++ b/src/prometheus/web/ui/templates/rules.html @@ -0,0 +1,37 @@ +{{define "head"}} + +{{end}} + +{{define "content"}} +
    +

    Rules

    + + {{range .RuleGroups}} + + + + + + + + + + + + {{range .Rules}} + + + + + {{end}} + {{else}} + + + + {{end}} + +

    {{.Name}}

    {{humanizeDuration .GetEvaluationTime.Seconds}}

    RuleEvaluation Time
    {{.HTMLSnippet pathPrefix}}{{humanizeDuration .GetEvaluationTime.Seconds}}
    + No rules defined +
    +
    +{{end}} diff --git a/src/prometheus/web/ui/templates/service-discovery.html b/src/prometheus/web/ui/templates/service-discovery.html new file mode 100644 index 0000000..f573338 --- /dev/null +++ b/src/prometheus/web/ui/templates/service-discovery.html @@ -0,0 +1,93 @@ +{{define "head"}} + + +{{end}} + + + +{{define "content"}} +
    + +

    Service Discovery

    +
    + +
      + {{range $i, $job := .Index}} +
    • + {{$job}} ({{ index $.Active $i }}/{{ index $.Total $i }} active targets) +
    • + {{end}} +
    + + + {{$targets := .Targets}} + {{range $i, $job := .Index}} + +
    +

    + {{$job}} + +

    + {{with index $.Dropped $i}} + {{if gt . 100 }} + + {{end}} + {{end}} +
    + + + + + + + + + {{range index $targets $job}} + + + + + {{end}} + + +
    + {{ end }} +
    + +{{end}} diff --git a/src/prometheus/web/ui/templates/status.html b/src/prometheus/web/ui/templates/status.html new file mode 100644 index 0000000..18c4cb9 --- /dev/null +++ b/src/prometheus/web/ui/templates/status.html @@ -0,0 +1,97 @@ +{{define "head"}}{{end}} + +{{define "content"}} +
    +

    Runtime Information

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Uptime{{.Birth.UTC}}
    Working Directory{{.CWD}}
    Configuration reload{{if .ReloadConfigSuccess}}Successful{{else}}Failed{{end}}
    Last successful configuration reload{{.LastConfigTime.UTC}}
    Head chunks{{.ChunkCount}}
    Head time series{{.TimeSeriesCount}}
    WAL corruptions{{.CorruptionCount}}
    Goroutines{{.GoroutineCount}}
    GOMAXPROCS{{.GOMAXPROCS}}
    GOGC{{.GOGC}}
    + +

    Build Information

    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Version{{.Version.Version}}
    Revision{{.Version.Revision}}
    Branch{{.Version.Branch}}
    BuildUser{{.Version.BuildUser}}
    BuildDate{{.Version.BuildDate}}
    GoVersion{{.Version.GoVersion}}
    + +

    Alertmanagers

    + + + + + + {{range .Alertmanagers}} + + {{/* Alertmanager URLs always have Scheme, Host and Path set */}} + + + {{end}} + +
    Endpoint
    {{.Scheme}}://{{.Host}}{{.Path}}
    + +
    +{{end}} diff --git a/src/prometheus/web/ui/templates/targets.html b/src/prometheus/web/ui/templates/targets.html new file mode 100644 index 0000000..5197dc6 --- /dev/null +++ b/src/prometheus/web/ui/templates/targets.html @@ -0,0 +1,79 @@ +{{define "head"}} + + +{{end}} + + +{{define "content"}} +
    +

    Targets

    +
    + + +
    + + {{range $job, $pool := .TargetPools}} + {{$healthy := numHealthy $pool}} + {{$total := len $pool}} + +
    +

    + {{$job}} ({{$healthy}}/{{$total}} up) + +

    + + + + + + + + + + + + {{range $pool}} + + + + + + + + {{end}} + +
    EndpointStateLabelsLast ScrapeError
    + {{.URL.Scheme}}://{{.URL.Host}}{{.URL.Path}}
    + {{range $label, $values := .URL.Query }} + {{range $i, $value := $values}} + {{$label}}="{{$value}}" + {{end}} + {{end}} +
    + + {{.Health}} + + + + {{$labels := stripLabels .Labels.Map "job"}} + {{range $label, $value := $labels}} + {{$label}}="{{$value}}" + {{else}} + none + {{end}} + + + {{if .LastScrape.IsZero}}Never{{else}}{{since .LastScrape}} ago{{end}} + + {{if .LastError}} + {{.LastError}} + {{end}} +
    +
    + {{ end }} +
    +{{end}} diff --git a/src/prometheus/web/web.go b/src/prometheus/web/web.go new file mode 100644 index 0000000..4a4c300 --- /dev/null +++ b/src/prometheus/web/web.go @@ -0,0 +1,912 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package web + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + stdlog "log" + "math" + "net" + "net/http" + "net/http/pprof" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc" + + pprof_runtime "runtime/pprof" + template_text "text/template" + + "github.com/cockroachdb/cmux" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/mwitkow/go-conntrack" + "github.com/opentracing-contrib/go-stdlib/nethttp" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "github.com/prometheus/common/route" + "github.com/prometheus/tsdb" + "golang.org/x/net/netutil" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/notifier" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/template" + "github.com/prometheus/prometheus/util/httputil" + api_v1 "github.com/prometheus/prometheus/web/api/v1" + api_v2 "github.com/prometheus/prometheus/web/api/v2" + "github.com/prometheus/prometheus/web/ui" +) + +var localhostRepresentations = []string{"127.0.0.1", "localhost"} + +var ( + requestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "prometheus_http_request_duration_seconds", + Help: "Histogram of latencies for HTTP requests.", + Buckets: []float64{.1, .2, .4, 1, 3, 8, 20, 60, 120}, + }, + []string{"handler"}, + ) + responseSize = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "prometheus_http_response_size_bytes", + Help: "Histogram of response size for HTTP requests.", + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, + []string{"handler"}, + ) +) + +func init() { + prometheus.MustRegister(requestDuration, responseSize) +} + +// Handler serves various HTTP endpoints of the Prometheus server +type Handler struct { + logger log.Logger + + scrapeManager *scrape.Manager + ruleManager *rules.Manager + queryEngine *promql.Engine + context context.Context + tsdb func() *tsdb.DB + storage storage.Storage + notifier *notifier.Manager + + apiV1 *api_v1.API + + router *route.Router + quitCh chan struct{} + reloadCh chan chan error + options *Options + config *config.Config + configString string + versionInfo *PrometheusVersion + birth time.Time + cwd string + flagsMap map[string]string + + externalLabels model.LabelSet + mtx sync.RWMutex + now func() model.Time + + ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions. +} + +// ApplyConfig updates the config field of the Handler struct +func (h *Handler) ApplyConfig(conf *config.Config) error { + h.mtx.Lock() + defer h.mtx.Unlock() + + h.config = conf + + return nil +} + +// PrometheusVersion contains build information about Prometheus. +type PrometheusVersion struct { + Version string `json:"version"` + Revision string `json:"revision"` + Branch string `json:"branch"` + BuildUser string `json:"buildUser"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` +} + +// Options for the web Handler. +type Options struct { + Context context.Context + TSDB func() *tsdb.DB + Storage storage.Storage + QueryEngine *promql.Engine + ScrapeManager *scrape.Manager + RuleManager *rules.Manager + Notifier *notifier.Manager + Version *PrometheusVersion + Flags map[string]string + + ListenAddress string + ReadTimeout time.Duration + MaxConnections int + ExternalURL *url.URL + RoutePrefix string + UseLocalAssets bool + UserAssetsPath string + ConsoleTemplatesPath string + ConsoleLibrariesPath string + EnableLifecycle bool + EnableAdminAPI bool +} + +func instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc { + return promhttp.InstrumentHandlerDuration( + requestDuration.MustCurryWith(prometheus.Labels{"handler": handlerName}), + promhttp.InstrumentHandlerResponseSize( + responseSize.MustCurryWith(prometheus.Labels{"handler": handlerName}), + handler, + ), + ) +} + +// New initializes a new web Handler. +func New(logger log.Logger, o *Options) *Handler { + router := route.New().WithInstrumentation(instrumentHandler) + cwd, err := os.Getwd() + + if err != nil { + cwd = "" + } + if logger == nil { + logger = log.NewNopLogger() + } + + h := &Handler{ + logger: logger, + router: router, + quitCh: make(chan struct{}), + reloadCh: make(chan chan error), + options: o, + versionInfo: o.Version, + birth: time.Now(), + cwd: cwd, + flagsMap: o.Flags, + + context: o.Context, + scrapeManager: o.ScrapeManager, + ruleManager: o.RuleManager, + queryEngine: o.QueryEngine, + tsdb: o.TSDB, + storage: o.Storage, + notifier: o.Notifier, + + now: model.Now, + + ready: 0, + } + + h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, h.scrapeManager, h.notifier, + func() config.Config { + h.mtx.RLock() + defer h.mtx.RUnlock() + return *h.config + }, + o.Flags, + h.testReady, + h.options.TSDB, + h.options.EnableAdminAPI, + ) + + if o.RoutePrefix != "/" { + // If the prefix is missing for the root path, prepend it. + router.Get("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, o.RoutePrefix, http.StatusFound) + }) + router = router.WithPrefix(o.RoutePrefix) + } + + readyf := h.testReady + + router.Get("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound) + }) + + router.Get("/alerts", readyf(h.alerts)) + router.Get("/graph", readyf(h.graph)) + router.Get("/status", readyf(h.status)) + router.Get("/flags", readyf(h.flags)) + router.Get("/config", readyf(h.serveConfig)) + router.Get("/rules", readyf(h.rules)) + router.Get("/targets", readyf(h.targets)) + router.Get("/version", readyf(h.version)) + router.Get("/service-discovery", readyf(h.serviceDiscovery)) + + router.Get("/heap", h.dumpHeap) + + router.Get("/metrics", promhttp.Handler().ServeHTTP) + + router.Get("/federate", readyf(httputil.CompressionHandler{ + Handler: http.HandlerFunc(h.federation), + }.ServeHTTP)) + + router.Get("/consoles/*filepath", readyf(h.consoles)) + + router.Get("/static/*filepath", h.serveStaticAsset) + + if o.UserAssetsPath != "" { + router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath)) + } + + if o.EnableLifecycle { + router.Post("/-/quit", h.quit) + router.Post("/-/reload", h.reload) + } else { + router.Post("/-/quit", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusForbidden) + w.Write([]byte("Lifecycle APIs are not enabled")) + }) + router.Post("/-/reload", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusForbidden) + w.Write([]byte("Lifecycle APIs are not enabled")) + }) + } + router.Get("/-/quit", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) + w.Write([]byte("Only POST requests allowed")) + }) + router.Get("/-/reload", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) + w.Write([]byte("Only POST requests allowed")) + }) + + router.Get("/debug/*subpath", serveDebug) + router.Post("/debug/*subpath", serveDebug) + + router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Prometheus is Healthy.\n") + }) + router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Prometheus is Ready.\n") + })) + + return h +} + +var corsHeaders = map[string]string{ + "Access-Control-Allow-Headers": "Accept, Authorization, Content-Type, Origin", + "Access-Control-Allow-Methods": "GET, OPTIONS", + "Access-Control-Allow-Origin": "*", + "Access-Control-Expose-Headers": "Date", +} + +// Enables cross-site script calls. +func setCORS(w http.ResponseWriter) { + for h, v := range corsHeaders { + w.Header().Set(h, v) + } +} + +func serveDebug(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + subpath := route.Param(ctx, "subpath") + + if subpath == "/pprof" { + http.Redirect(w, req, req.URL.Path+"/", http.StatusMovedPermanently) + return + } + + if !strings.HasPrefix(subpath, "/pprof/") { + http.NotFound(w, req) + return + } + subpath = strings.TrimPrefix(subpath, "/pprof/") + + switch subpath { + case "cmdline": + pprof.Cmdline(w, req) + case "profile": + pprof.Profile(w, req) + case "symbol": + pprof.Symbol(w, req) + case "trace": + pprof.Trace(w, req) + default: + req.URL.Path = "/debug/pprof/" + subpath + pprof.Index(w, req) + } +} + +func (h *Handler) serveStaticAsset(w http.ResponseWriter, req *http.Request) { + fp := route.Param(req.Context(), "filepath") + fp = filepath.Join("web/ui/static", fp) + + info, err := ui.AssetInfo(fp) + if err != nil { + level.Warn(h.logger).Log("msg", "Could not get file info", "err", err, "file", fp) + w.WriteHeader(http.StatusNotFound) + return + } + file, err := ui.Asset(fp) + if err != nil { + if err != io.EOF { + level.Warn(h.logger).Log("msg", "Could not get file", "err", err, "file", fp) + } + w.WriteHeader(http.StatusNotFound) + return + } + + http.ServeContent(w, req, info.Name(), info.ModTime(), bytes.NewReader(file)) +} + +// Ready sets Handler to be ready. +func (h *Handler) Ready() { + atomic.StoreUint32(&h.ready, 1) +} + +// Verifies whether the server is ready or not. +func (h *Handler) isReady() bool { + ready := atomic.LoadUint32(&h.ready) + return ready > 0 +} + +// Checks if server is ready, calls f if it is, returns 503 if it is not. +func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if h.isReady() { + f(w, r) + } else { + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, "Service Unavailable") + } + } +} + +// Checks if server is ready, calls f if it is, returns 503 if it is not. +func (h *Handler) testReadyHandler(f http.Handler) http.HandlerFunc { + return h.testReady(f.ServeHTTP) +} + +// Quit returns the receive-only quit channel. +func (h *Handler) Quit() <-chan struct{} { + return h.quitCh +} + +// Reload returns the receive-only channel that signals configuration reload requests. +func (h *Handler) Reload() <-chan chan error { + return h.reloadCh +} + +// Run serves the HTTP endpoints. +func (h *Handler) Run(ctx context.Context) error { + level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress) + + listener, err := net.Listen("tcp", h.options.ListenAddress) + if err != nil { + return err + } + listener = netutil.LimitListener(listener, h.options.MaxConnections) + + // Monitor incoming connections with conntrack. + listener = conntrack.NewListener(listener, + conntrack.TrackWithName("http"), + conntrack.TrackWithTracing()) + + var ( + m = cmux.New(listener) + grpcl = m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) + httpl = m.Match(cmux.HTTP1Fast()) + grpcSrv = grpc.NewServer() + ) + av2 := api_v2.New( + time.Now, + h.options.TSDB, + h.options.QueryEngine, + h.options.Storage.Querier, + func() []*scrape.Target { + return h.options.ScrapeManager.TargetsActive() + }, + func() []*url.URL { + return h.options.Notifier.Alertmanagers() + }, + h.options.EnableAdminAPI, + ) + av2.RegisterGRPC(grpcSrv) + + hh, err := av2.HTTPHandler(h.options.ListenAddress) + if err != nil { + return err + } + + hhFunc := h.testReadyHandler(hh) + + operationName := nethttp.OperationNameFunc(func(r *http.Request) string { + return fmt.Sprintf("%s %s", r.Method, r.URL.Path) + }) + mux := http.NewServeMux() + mux.Handle("/", h.router) + + av1 := route.New().WithInstrumentation(instrumentHandler) + h.apiV1.Register(av1) + apiPath := "/api" + if h.options.RoutePrefix != "/" { + apiPath = h.options.RoutePrefix + apiPath + level.Info(h.logger).Log("msg", "router prefix", "prefix", h.options.RoutePrefix) + } + + mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1)) + + mux.Handle(apiPath+"/", http.StripPrefix(apiPath, + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + setCORS(w) + hhFunc(w, r) + }), + )) + + errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0) + + httpSrv := &http.Server{ + Handler: nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName), + ErrorLog: errlog, + ReadTimeout: h.options.ReadTimeout, + } + + errCh := make(chan error) + go func() { + errCh <- httpSrv.Serve(httpl) + }() + go func() { + errCh <- grpcSrv.Serve(grpcl) + }() + go func() { + errCh <- m.Serve() + }() + + select { + case e := <-errCh: + return e + case <-ctx.Done(): + httpSrv.Shutdown(ctx) + grpcSrv.GracefulStop() + return nil + } +} + +func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) { + alerts := h.ruleManager.AlertingRules() + alertsSorter := byAlertStateAndNameSorter{alerts: alerts} + sort.Sort(alertsSorter) + + alertStatus := AlertStatus{ + AlertingRules: alertsSorter.alerts, + AlertStateToRowClass: map[rules.AlertState]string{ + rules.StateInactive: "success", + rules.StatePending: "warning", + rules.StateFiring: "danger", + }, + } + h.executeTemplate(w, "alerts.html", alertStatus) +} + +func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + name := route.Param(ctx, "filepath") + + file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + text, err := ioutil.ReadAll(file) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Provide URL parameters as a map for easy use. Advanced users may have need for + // parameters beyond the first, so provide RawParams. + rawParams, err := url.ParseQuery(r.URL.RawQuery) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + params := map[string]string{} + for k, v := range rawParams { + params[k] = v[0] + } + data := struct { + RawParams url.Values + Params map[string]string + Path string + }{ + RawParams: rawParams, + Params: params, + Path: strings.TrimLeft(name, "/"), + } + + tmpl := template.NewTemplateExpander( + h.context, + string(text), + "__console_"+name, + data, + h.now(), + template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)), + h.options.ExternalURL, + ) + filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + result, err := tmpl.ExpandHTML(filenames) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + io.WriteString(w, result) +} + +func (h *Handler) graph(w http.ResponseWriter, r *http.Request) { + h.executeTemplate(w, "graph.html", nil) +} + +func (h *Handler) status(w http.ResponseWriter, r *http.Request) { + status := struct { + Birth time.Time + CWD string + Version *PrometheusVersion + Alertmanagers []*url.URL + GoroutineCount int + GOMAXPROCS int + GOGC string + CorruptionCount int64 + ChunkCount int64 + TimeSeriesCount int64 + LastConfigTime time.Time + ReloadConfigSuccess bool + }{ + Birth: h.birth, + CWD: h.cwd, + Version: h.versionInfo, + Alertmanagers: h.notifier.Alertmanagers(), + GoroutineCount: runtime.NumGoroutine(), + GOMAXPROCS: runtime.GOMAXPROCS(0), + GOGC: os.Getenv("GOGC"), + } + metrics, err := prometheus.DefaultGatherer.Gather() + if err != nil { + http.Error(w, fmt.Sprintf("error gathering runtime status: %s", err), http.StatusInternalServerError) + return + } + for _, mF := range metrics { + switch *mF.Name { + case "prometheus_tsdb_head_chunks": + status.ChunkCount = int64(toFloat64(mF)) + case "prometheus_tsdb_head_series": + status.TimeSeriesCount = int64(toFloat64(mF)) + case "prometheus_tsdb_wal_corruptions_total": + status.CorruptionCount = int64(toFloat64(mF)) + case "prometheus_config_last_reload_successful": + status.ReloadConfigSuccess = toFloat64(mF) != 0 + case "prometheus_config_last_reload_success_timestamp_seconds": + status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0) + } + } + h.executeTemplate(w, "status.html", status) +} + +func toFloat64(f *io_prometheus_client.MetricFamily) float64 { + m := *f.Metric[0] + if m.Gauge != nil { + return m.Gauge.GetValue() + } + if m.Counter != nil { + return m.Counter.GetValue() + } + if m.Untyped != nil { + return m.Untyped.GetValue() + } + return math.NaN() +} + +func (h *Handler) flags(w http.ResponseWriter, r *http.Request) { + h.executeTemplate(w, "flags.html", h.flagsMap) +} + +func (h *Handler) serveConfig(w http.ResponseWriter, r *http.Request) { + h.mtx.RLock() + defer h.mtx.RUnlock() + + h.executeTemplate(w, "config.html", h.config.String()) +} + +func (h *Handler) rules(w http.ResponseWriter, r *http.Request) { + h.executeTemplate(w, "rules.html", h.ruleManager) +} + +func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) { + var index []string + targets := h.scrapeManager.TargetsAll() + for job := range targets { + index = append(index, job) + } + sort.Strings(index) + scrapeConfigData := struct { + Index []string + Targets map[string][]*scrape.Target + Active []int + Dropped []int + Total []int + }{ + Index: index, + Targets: make(map[string][]*scrape.Target), + Active: make([]int, len(index)), + Dropped: make([]int, len(index)), + Total: make([]int, len(index)), + } + for i, job := range scrapeConfigData.Index { + scrapeConfigData.Targets[job] = make([]*scrape.Target, 0, len(targets[job])) + scrapeConfigData.Total[i] = len(targets[job]) + for _, target := range targets[job] { + // Do not display more than 100 dropped targets per job to avoid + // returning too much data to the clients. + if target.Labels().Len() == 0 { + scrapeConfigData.Dropped[i]++ + if scrapeConfigData.Dropped[i] > 100 { + continue + } + } else { + scrapeConfigData.Active[i]++ + } + scrapeConfigData.Targets[job] = append(scrapeConfigData.Targets[job], target) + } + } + + h.executeTemplate(w, "service-discovery.html", scrapeConfigData) +} + +func (h *Handler) targets(w http.ResponseWriter, r *http.Request) { + // Bucket targets by job label + tps := map[string][]*scrape.Target{} + for _, t := range h.scrapeManager.TargetsActive() { + job := t.Labels().Get(model.JobLabel) + tps[job] = append(tps[job], t) + } + + for _, targets := range tps { + sort.Slice(targets, func(i, j int) bool { + return targets[i].Labels().Get(labels.InstanceName) < targets[j].Labels().Get(labels.InstanceName) + }) + } + + h.executeTemplate(w, "targets.html", struct { + TargetPools map[string][]*scrape.Target + }{ + TargetPools: tps, + }) +} + +func (h *Handler) version(w http.ResponseWriter, r *http.Request) { + dec := json.NewEncoder(w) + if err := dec.Encode(h.versionInfo); err != nil { + http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError) + } +} + +func (h *Handler) quit(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Requesting termination... Goodbye!") + close(h.quitCh) +} + +func (h *Handler) reload(w http.ResponseWriter, r *http.Request) { + rc := make(chan error) + h.reloadCh <- rc + if err := <-rc; err != nil { + http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError) + } +} + +func (h *Handler) consolesPath() string { + if _, err := os.Stat(h.options.ConsoleTemplatesPath + "/index.html"); !os.IsNotExist(err) { + return h.options.ExternalURL.Path + "/consoles/index.html" + } + if h.options.UserAssetsPath != "" { + if _, err := os.Stat(h.options.UserAssetsPath + "/index.html"); !os.IsNotExist(err) { + return h.options.ExternalURL.Path + "/user/index.html" + } + } + return "" +} + +func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap { + return template_text.FuncMap{ + "since": func(t time.Time) time.Duration { + return time.Since(t) / time.Millisecond * time.Millisecond + }, + "consolesPath": func() string { return consolesPath }, + "pathPrefix": func() string { return opts.ExternalURL.Path }, + "buildVersion": func() string { return opts.Version.Revision }, + "stripLabels": func(lset map[string]string, labels ...string) map[string]string { + for _, ln := range labels { + delete(lset, ln) + } + return lset + }, + "globalURL": func(u *url.URL) *url.URL { + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return u + } + for _, lhr := range localhostRepresentations { + if host == lhr { + _, ownPort, err := net.SplitHostPort(opts.ListenAddress) + if err != nil { + return u + } + + if port == ownPort { + // Only in the case where the target is on localhost and its port is + // the same as the one we're listening on, we know for sure that + // we're monitoring our own process and that we need to change the + // scheme, hostname, and port to the externally reachable ones as + // well. We shouldn't need to touch the path at all, since if a + // path prefix is defined, the path under which we scrape ourselves + // should already contain the prefix. + u.Scheme = opts.ExternalURL.Scheme + u.Host = opts.ExternalURL.Host + } else { + // Otherwise, we only know that localhost is not reachable + // externally, so we replace only the hostname by the one in the + // external URL. It could be the wrong hostname for the service on + // this port, but it's still the best possible guess. + host, _, err := net.SplitHostPort(opts.ExternalURL.Host) + if err != nil { + return u + } + u.Host = host + ":" + port + } + break + } + } + return u + }, + "numHealthy": func(pool []*scrape.Target) int { + alive := len(pool) + for _, p := range pool { + if p.Health() != scrape.HealthGood { + alive-- + } + } + + return alive + }, + "healthToClass": func(th scrape.TargetHealth) string { + switch th { + case scrape.HealthUnknown: + return "warning" + case scrape.HealthGood: + return "success" + default: + return "danger" + } + }, + "alertStateToClass": func(as rules.AlertState) string { + switch as { + case rules.StateInactive: + return "success" + case rules.StatePending: + return "warning" + case rules.StateFiring: + return "danger" + default: + panic("unknown alert state") + } + }, + } +} + +func (h *Handler) getTemplate(name string) (string, error) { + baseTmpl, err := ui.Asset("web/ui/templates/_base.html") + if err != nil { + return "", fmt.Errorf("error reading base template: %s", err) + } + pageTmpl, err := ui.Asset(filepath.Join("web/ui/templates", name)) + if err != nil { + return "", fmt.Errorf("error reading page template %s: %s", name, err) + } + return string(baseTmpl) + string(pageTmpl), nil +} + +func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) { + text, err := h.getTemplate(name) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + tmpl := template.NewTemplateExpander( + h.context, + text, + name, + data, + h.now(), + template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)), + h.options.ExternalURL, + ) + tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options)) + + result, err := tmpl.ExpandHTML(nil) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + io.WriteString(w, result) +} + +func (h *Handler) dumpHeap(w http.ResponseWriter, r *http.Request) { + target := fmt.Sprintf("/tmp/%d.heap", time.Now().Unix()) + f, err := os.Create(target) + if err != nil { + level.Error(h.logger).Log("msg", "Could not dump heap", "err", err) + } + fmt.Fprintf(w, "Writing to %s...", target) + defer f.Close() + pprof_runtime.WriteHeapProfile(f) + fmt.Fprintf(w, "Done") +} + +// AlertStatus bundles alerting rules and the mapping of alert states to row classes. +type AlertStatus struct { + AlertingRules []*rules.AlertingRule + AlertStateToRowClass map[rules.AlertState]string +} + +type byAlertStateAndNameSorter struct { + alerts []*rules.AlertingRule +} + +func (s byAlertStateAndNameSorter) Len() int { + return len(s.alerts) +} + +func (s byAlertStateAndNameSorter) Less(i, j int) bool { + return s.alerts[i].State() > s.alerts[j].State() || + (s.alerts[i].State() == s.alerts[j].State() && + s.alerts[i].Name() < s.alerts[j].Name()) +} + +func (s byAlertStateAndNameSorter) Swap(i, j int) { + s.alerts[i], s.alerts[j] = s.alerts[j], s.alerts[i] +} diff --git a/src/prometheus/web/web_test.go b/src/prometheus/web/web_test.go new file mode 100644 index 0000000..c18c23c --- /dev/null +++ b/src/prometheus/web/web_test.go @@ -0,0 +1,304 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package web + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/prometheus/prometheus/storage/tsdb" + "github.com/prometheus/prometheus/util/testutil" + libtsdb "github.com/prometheus/tsdb" +) + +func TestMain(m *testing.M) { + // On linux with a global proxy the tests will fail as the go client(http,grpc) tries to connect through the proxy. + os.Setenv("no_proxy", "localhost,127.0.0.1,0.0.0.0,:") + os.Exit(m.Run()) +} +func TestGlobalURL(t *testing.T) { + opts := &Options{ + ListenAddress: ":9090", + ExternalURL: &url.URL{ + Scheme: "https", + Host: "externalhost:80", + Path: "/path/prefix", + }, + } + + tests := []struct { + inURL string + outURL string + }{ + { + // Nothing should change if the input URL is not on localhost, even if the port is our listening port. + inURL: "http://somehost:9090/metrics", + outURL: "http://somehost:9090/metrics", + }, + { + // Port and host should change if target is on localhost and port is our listening port. + inURL: "http://localhost:9090/metrics", + outURL: "https://externalhost:80/metrics", + }, + { + // Only the host should change if the port is not our listening port, but the host is localhost. + inURL: "http://localhost:8000/metrics", + outURL: "http://externalhost:8000/metrics", + }, + { + // Alternative localhost representations should also work. + inURL: "http://127.0.0.1:9090/metrics", + outURL: "https://externalhost:80/metrics", + }, + } + + for _, test := range tests { + inURL, err := url.Parse(test.inURL) + + testutil.Ok(t, err) + + globalURL := tmplFuncs("", opts)["globalURL"].(func(u *url.URL) *url.URL) + outURL := globalURL(inURL) + + testutil.Equals(t, test.outURL, outURL.String()) + } +} + +func TestReadyAndHealthy(t *testing.T) { + t.Parallel() + dbDir, err := ioutil.TempDir("", "tsdb-ready") + + testutil.Ok(t, err) + + defer os.RemoveAll(dbDir) + db, err := libtsdb.Open(dbDir, nil, nil, nil) + + testutil.Ok(t, err) + + opts := &Options{ + ListenAddress: ":9090", + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + Storage: &tsdb.ReadyStorage{}, + QueryEngine: nil, + ScrapeManager: nil, + RuleManager: nil, + Notifier: nil, + RoutePrefix: "/", + EnableAdminAPI: true, + TSDB: func() *libtsdb.DB { return db }, + } + + opts.Flags = map[string]string{} + + webHandler := New(nil, opts) + go func() { + err := webHandler.Run(context.Background()) + if err != nil { + panic(fmt.Sprintf("Can't start web handler:%s", err)) + } + }() + + // Give some time for the web goroutine to run since we need the server + // to be up before starting tests. + time.Sleep(5 * time.Second) + + resp, err := http.Get("http://localhost:9090/-/healthy") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Get("http://localhost:9090/-/ready") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + resp, err = http.Get("http://localhost:9090/version") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/snapshot", "", strings.NewReader("")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + // Set to ready. + webHandler.Ready() + + resp, err = http.Get("http://localhost:9090/-/healthy") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Get("http://localhost:9090/-/ready") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Get("http://localhost:9090/version") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/snapshot", "", strings.NewReader("")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) +} + +func TestRoutePrefix(t *testing.T) { + t.Parallel() + dbDir, err := ioutil.TempDir("", "tsdb-ready") + + testutil.Ok(t, err) + + defer os.RemoveAll(dbDir) + + db, err := libtsdb.Open(dbDir, nil, nil, nil) + + testutil.Ok(t, err) + + opts := &Options{ + ListenAddress: ":9091", + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + Storage: &tsdb.ReadyStorage{}, + QueryEngine: nil, + ScrapeManager: nil, + RuleManager: nil, + Notifier: nil, + RoutePrefix: "/prometheus", + EnableAdminAPI: true, + TSDB: func() *libtsdb.DB { return db }, + } + + opts.Flags = map[string]string{} + + webHandler := New(nil, opts) + go func() { + err := webHandler.Run(context.Background()) + if err != nil { + panic(fmt.Sprintf("Can't start web handler:%s", err)) + } + }() + + // Give some time for the web goroutine to run since we need the server + // to be up before starting tests. + time.Sleep(5 * time.Second) + + resp, err := http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/healthy") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/ready") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/version") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/snapshot", "", strings.NewReader("")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode) + + // Set to ready. + webHandler.Ready() + + resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/healthy") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/ready") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/version") + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/snapshot", "", strings.NewReader("")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) + + resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}")) + + testutil.Ok(t, err) + testutil.Equals(t, http.StatusOK, resp.StatusCode) +} +func TestDebugHandler(t *testing.T) { + for _, tc := range []struct { + prefix, url string + code int + }{ + {"/", "/debug/pprof/cmdline", 200}, + {"/foo", "/foo/debug/pprof/cmdline", 200}, + + {"/", "/debug/pprof/goroutine", 200}, + {"/foo", "/foo/debug/pprof/goroutine", 200}, + + {"/", "/debug/pprof/foo", 404}, + {"/foo", "/bar/debug/pprof/goroutine", 404}, + } { + opts := &Options{ + RoutePrefix: tc.prefix, + } + handler := New(nil, opts) + handler.Ready() + + w := httptest.NewRecorder() + + req, err := http.NewRequest("GET", tc.url, nil) + + testutil.Ok(t, err) + + handler.router.ServeHTTP(w, req) + + testutil.Equals(t, tc.code, w.Code) + } +} -- 2.16.6